From cf69f8d59b0a1fad2b0f313281647e3ea527cf5e Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 8 Jul 2024 02:11:20 -0700 Subject: [PATCH 001/332] Support MSC3916 by adding a federation /thumbnail endpoint and authenticated `_matrix/client/v1/media/thumbnail` endpoint (#17388) [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/pull/3916) added the endpoints `_matrix/federation/v1/media/thumbnail` and the authenticated `_matrix/client/v1/media/thumbnail`. This PR implements those endpoints, along with stabilizing `_matrix/client/v1/media/config` and `_matrix/client/v1/media/preview_url`. Complement tests are at https://github.com/matrix-org/complement/pull/728 --- changelog.d/17388.feature | 3 + synapse/config/experimental.py | 4 - .../federation/transport/server/__init__.py | 6 +- synapse/federation/transport/server/_base.py | 4 + .../federation/transport/server/federation.py | 56 +++ synapse/media/media_repository.py | 11 +- synapse/media/thumbnailer.py | 82 +++- synapse/rest/client/media.py | 43 ++- synapse/rest/media/thumbnail_resource.py | 19 +- tests/federation/test_federation_media.py | 110 ++++++ tests/media/test_media_storage.py | 20 +- tests/rest/client/test_media.py | 358 ++++++++++++++---- 12 files changed, 585 insertions(+), 131 deletions(-) create mode 100644 changelog.d/17388.feature diff --git a/changelog.d/17388.feature b/changelog.d/17388.feature new file mode 100644 index 0000000000..f04f49f085 --- /dev/null +++ b/changelog.d/17388.feature @@ -0,0 +1,3 @@ +Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md) +by adding `_matrix/client/v1/media/thumbnail`, `_matrix/federation/v1/media/thumbnail` endpoints and stabilizing the +remaining `_matrix/client/v1/media` endpoints. \ No newline at end of file diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 1b72727b75..c21b7eb37e 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -437,10 +437,6 @@ class ExperimentalConfig(Config): "msc3823_account_suspension", False ) - self.msc3916_authenticated_media_enabled = experimental.get( - "msc3916_authenticated_media_enabled", False - ) - # MSC4151: Report room API (Client-Server API) self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False) diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index c44e5daa47..5f997040d0 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -33,6 +33,7 @@ from synapse.federation.transport.server.federation import ( FEDERATION_SERVLET_CLASSES, FederationAccountStatusServlet, FederationMediaDownloadServlet, + FederationMediaThumbnailServlet, FederationUnstableClientKeysClaimServlet, ) from synapse.http.server import HttpServer, JsonResource @@ -316,7 +317,10 @@ def register_servlets( ): continue - if servletclass == FederationMediaDownloadServlet: + if ( + servletclass == FederationMediaDownloadServlet + or servletclass == FederationMediaThumbnailServlet + ): if not hs.config.server.enable_media_repo: continue diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index e124481474..9094201da0 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -363,6 +363,8 @@ class BaseFederationServlet: if ( func.__self__.__class__.__name__ # type: ignore == "FederationMediaDownloadServlet" + or func.__self__.__class__.__name__ # type: ignore + == "FederationMediaThumbnailServlet" ): response = await func( origin, content, request, *args, **kwargs @@ -375,6 +377,8 @@ class BaseFederationServlet: if ( func.__self__.__class__.__name__ # type: ignore == "FederationMediaDownloadServlet" + or func.__self__.__class__.__name__ # type: ignore + == "FederationMediaThumbnailServlet" ): response = await func( origin, content, request, *args, **kwargs diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index ec957768d4..b075a86f68 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -46,11 +46,13 @@ from synapse.http.servlet import ( parse_boolean_from_args, parse_integer, parse_integer_from_args, + parse_string, parse_string_from_args, parse_strings_from_args, ) from synapse.http.site import SynapseRequest from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS +from synapse.media.thumbnailer import ThumbnailProvider from synapse.types import JsonDict from synapse.util import SYNAPSE_VERSION from synapse.util.ratelimitutils import FederationRateLimiter @@ -826,6 +828,59 @@ class FederationMediaDownloadServlet(BaseFederationServerServlet): ) +class FederationMediaThumbnailServlet(BaseFederationServerServlet): + """ + Implementation of new federation media `/thumbnail` endpoint outlined in MSC3916. Returns + a multipart/mixed response consisting of a JSON object and the requested media + item. This endpoint only returns local media. + """ + + PATH = "/media/thumbnail/(?P[^/]*)" + RATELIMIT = True + + def __init__( + self, + hs: "HomeServer", + ratelimiter: FederationRateLimiter, + authenticator: Authenticator, + server_name: str, + ): + super().__init__(hs, authenticator, ratelimiter, server_name) + self.media_repo = self.hs.get_media_repository() + self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails + self.thumbnail_provider = ThumbnailProvider( + hs, self.media_repo, self.media_repo.media_storage + ) + + async def on_GET( + self, + origin: Optional[str], + content: Literal[None], + request: SynapseRequest, + media_id: str, + ) -> None: + + width = parse_integer(request, "width", required=True) + height = parse_integer(request, "height", required=True) + method = parse_string(request, "method", "scale") + # TODO Parse the Accept header to get an prioritised list of thumbnail types. + m_type = "image/png" + max_timeout_ms = parse_integer( + request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS + ) + max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS) + + if self.dynamic_thumbnails: + await self.thumbnail_provider.select_or_generate_local_thumbnail( + request, media_id, width, height, method, m_type, max_timeout_ms, True + ) + else: + await self.thumbnail_provider.respond_local_thumbnail( + request, media_id, width, height, method, m_type, max_timeout_ms, True + ) + self.media_repo.mark_recently_accessed(None, media_id) + + FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = ( FederationSendServlet, FederationEventServlet, @@ -858,4 +913,5 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = ( FederationMakeKnockServlet, FederationAccountStatusServlet, FederationMediaDownloadServlet, + FederationMediaThumbnailServlet, ) diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 542642b900..87c929eb20 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -542,7 +542,12 @@ class MediaRepository: respond_404(request) async def get_remote_media_info( - self, server_name: str, media_id: str, max_timeout_ms: int, ip_address: str + self, + server_name: str, + media_id: str, + max_timeout_ms: int, + ip_address: str, + use_federation: bool, ) -> RemoteMedia: """Gets the media info associated with the remote file, downloading if necessary. @@ -553,6 +558,8 @@ class MediaRepository: max_timeout_ms: the maximum number of milliseconds to wait for the media to be uploaded. ip_address: IP address of the requester + use_federation: if a download is necessary, whether to request the remote file + over the federation `/download` endpoint Returns: The media info of the file @@ -573,7 +580,7 @@ class MediaRepository: max_timeout_ms, self.download_ratelimiter, ip_address, - False, + use_federation, ) # Ensure we actually use the responder so that it releases resources diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index f8a9560784..413a720e40 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -36,9 +36,11 @@ from synapse.media._base import ( ThumbnailInfo, respond_404, respond_with_file, + respond_with_multipart_responder, respond_with_responder, ) -from synapse.media.media_storage import MediaStorage +from synapse.media.media_storage import FileResponder, MediaStorage +from synapse.storage.databases.main.media_repository import LocalMedia if TYPE_CHECKING: from synapse.media.media_repository import MediaRepository @@ -271,6 +273,7 @@ class ThumbnailProvider: method: str, m_type: str, max_timeout_ms: int, + for_federation: bool, ) -> None: media_info = await self.media_repo.get_local_media_info( request, media_id, max_timeout_ms @@ -290,6 +293,8 @@ class ThumbnailProvider: media_id, url_cache=bool(media_info.url_cache), server_name=None, + for_federation=for_federation, + media_info=media_info, ) async def select_or_generate_local_thumbnail( @@ -301,6 +306,7 @@ class ThumbnailProvider: desired_method: str, desired_type: str, max_timeout_ms: int, + for_federation: bool, ) -> None: media_info = await self.media_repo.get_local_media_info( request, media_id, max_timeout_ms @@ -326,10 +332,16 @@ class ThumbnailProvider: responder = await self.media_storage.fetch_media(file_info) if responder: - await respond_with_responder( - request, responder, info.type, info.length - ) - return + if for_federation: + await respond_with_multipart_responder( + self.hs.get_clock(), request, responder, media_info + ) + return + else: + await respond_with_responder( + request, responder, info.type, info.length + ) + return logger.debug("We don't have a thumbnail of that size. Generating") @@ -344,7 +356,15 @@ class ThumbnailProvider: ) if file_path: - await respond_with_file(request, desired_type, file_path) + if for_federation: + await respond_with_multipart_responder( + self.hs.get_clock(), + request, + FileResponder(open(file_path, "rb")), + media_info, + ) + else: + await respond_with_file(request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") raise SynapseError(400, "Failed to generate thumbnail.") @@ -360,9 +380,10 @@ class ThumbnailProvider: desired_type: str, max_timeout_ms: int, ip_address: str, + use_federation: bool, ) -> None: media_info = await self.media_repo.get_remote_media_info( - server_name, media_id, max_timeout_ms, ip_address + server_name, media_id, max_timeout_ms, ip_address, use_federation ) if not media_info: respond_404(request) @@ -424,12 +445,13 @@ class ThumbnailProvider: m_type: str, max_timeout_ms: int, ip_address: str, + use_federation: bool, ) -> None: # TODO: Don't download the whole remote file # We should proxy the thumbnail from the remote server instead of # downloading the remote file and generating our own thumbnails. media_info = await self.media_repo.get_remote_media_info( - server_name, media_id, max_timeout_ms, ip_address + server_name, media_id, max_timeout_ms, ip_address, use_federation ) if not media_info: return @@ -448,6 +470,7 @@ class ThumbnailProvider: media_info.filesystem_id, url_cache=False, server_name=server_name, + for_federation=False, ) async def _select_and_respond_with_thumbnail( @@ -461,7 +484,9 @@ class ThumbnailProvider: media_id: str, file_id: str, url_cache: bool, + for_federation: bool, server_name: Optional[str] = None, + media_info: Optional[LocalMedia] = None, ) -> None: """ Respond to a request with an appropriate thumbnail from the previously generated thumbnails. @@ -476,6 +501,8 @@ class ThumbnailProvider: file_id: The ID of the media that a thumbnail is being requested for. url_cache: True if this is from a URL cache. server_name: The server name, if this is a remote thumbnail. + for_federation: whether the request is from the federation /thumbnail request + media_info: metadata about the media being requested. """ logger.debug( "_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s", @@ -511,13 +538,20 @@ class ThumbnailProvider: responder = await self.media_storage.fetch_media(file_info) if responder: - await respond_with_responder( - request, - responder, - file_info.thumbnail.type, - file_info.thumbnail.length, - ) - return + if for_federation: + assert media_info is not None + await respond_with_multipart_responder( + self.hs.get_clock(), request, responder, media_info + ) + return + else: + await respond_with_responder( + request, + responder, + file_info.thumbnail.type, + file_info.thumbnail.length, + ) + return # If we can't find the thumbnail we regenerate it. This can happen # if e.g. we've deleted the thumbnails but still have the original @@ -558,12 +592,18 @@ class ThumbnailProvider: ) responder = await self.media_storage.fetch_media(file_info) - await respond_with_responder( - request, - responder, - file_info.thumbnail.type, - file_info.thumbnail.length, - ) + if for_federation: + assert media_info is not None + await respond_with_multipart_responder( + self.hs.get_clock(), request, responder, media_info + ) + else: + await respond_with_responder( + request, + responder, + file_info.thumbnail.type, + file_info.thumbnail.length, + ) else: # This might be because: # 1. We can't create thumbnails for the given media (corrupted or diff --git a/synapse/rest/client/media.py b/synapse/rest/client/media.py index c0ae5dd66f..c30e3022de 100644 --- a/synapse/rest/client/media.py +++ b/synapse/rest/client/media.py @@ -47,7 +47,7 @@ from synapse.util.stringutils import parse_and_validate_server_name logger = logging.getLogger(__name__) -class UnstablePreviewURLServlet(RestServlet): +class PreviewURLServlet(RestServlet): """ Same as `GET /_matrix/media/r0/preview_url`, this endpoint provides a generic preview API for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix @@ -65,9 +65,7 @@ class UnstablePreviewURLServlet(RestServlet): * Matrix cannot be used to distribute the metadata between homeservers. """ - PATTERNS = [ - re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/preview_url$") - ] + PATTERNS = [re.compile(r"^/_matrix/client/v1/media/preview_url$")] def __init__( self, @@ -95,10 +93,8 @@ class UnstablePreviewURLServlet(RestServlet): respond_with_json_bytes(request, 200, og, send_cors=True) -class UnstableMediaConfigResource(RestServlet): - PATTERNS = [ - re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/config$") - ] +class MediaConfigResource(RestServlet): + PATTERNS = [re.compile(r"^/_matrix/client/v1/media/config$")] def __init__(self, hs: "HomeServer"): super().__init__() @@ -112,10 +108,10 @@ class UnstableMediaConfigResource(RestServlet): respond_with_json(request, 200, self.limits_dict, send_cors=True) -class UnstableThumbnailResource(RestServlet): +class ThumbnailResource(RestServlet): PATTERNS = [ re.compile( - "/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/(?P[^/]*)/(?P[^/]*)$" + "/_matrix/client/v1/media/thumbnail/(?P[^/]*)/(?P[^/]*)$" ) ] @@ -159,11 +155,25 @@ class UnstableThumbnailResource(RestServlet): if self._is_mine_server_name(server_name): if self.dynamic_thumbnails: await self.thumbnailer.select_or_generate_local_thumbnail( - request, media_id, width, height, method, m_type, max_timeout_ms + request, + media_id, + width, + height, + method, + m_type, + max_timeout_ms, + False, ) else: await self.thumbnailer.respond_local_thumbnail( - request, media_id, width, height, method, m_type, max_timeout_ms + request, + media_id, + width, + height, + method, + m_type, + max_timeout_ms, + False, ) self.media_repo.mark_recently_accessed(None, media_id) else: @@ -191,6 +201,7 @@ class UnstableThumbnailResource(RestServlet): m_type, max_timeout_ms, ip_address, + True, ) self.media_repo.mark_recently_accessed(server_name, media_id) @@ -260,11 +271,9 @@ class DownloadResource(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: media_repo = hs.get_media_repository() if hs.config.media.url_preview_enabled: - UnstablePreviewURLServlet(hs, media_repo, media_repo.media_storage).register( + PreviewURLServlet(hs, media_repo, media_repo.media_storage).register( http_server ) - UnstableMediaConfigResource(hs).register(http_server) - UnstableThumbnailResource(hs, media_repo, media_repo.media_storage).register( - http_server - ) + MediaConfigResource(hs).register(http_server) + ThumbnailResource(hs, media_repo, media_repo.media_storage).register(http_server) DownloadResource(hs, media_repo).register(http_server) diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index ce511c6dce..70354aa439 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -88,11 +88,25 @@ class ThumbnailResource(RestServlet): if self._is_mine_server_name(server_name): if self.dynamic_thumbnails: await self.thumbnail_provider.select_or_generate_local_thumbnail( - request, media_id, width, height, method, m_type, max_timeout_ms + request, + media_id, + width, + height, + method, + m_type, + max_timeout_ms, + False, ) else: await self.thumbnail_provider.respond_local_thumbnail( - request, media_id, width, height, method, m_type, max_timeout_ms + request, + media_id, + width, + height, + method, + m_type, + max_timeout_ms, + False, ) self.media_repo.mark_recently_accessed(None, media_id) else: @@ -120,5 +134,6 @@ class ThumbnailResource(RestServlet): m_type, max_timeout_ms, ip_address, + False, ) self.media_repo.mark_recently_accessed(server_name, media_id) diff --git a/tests/federation/test_federation_media.py b/tests/federation/test_federation_media.py index 142f73cfdb..0dcf20f5f5 100644 --- a/tests/federation/test_federation_media.py +++ b/tests/federation/test_federation_media.py @@ -35,6 +35,7 @@ from synapse.types import UserID from synapse.util import Clock from tests import unittest +from tests.media.test_media_storage import small_png from tests.test_utils import SMALL_PNG @@ -146,3 +147,112 @@ class FederationMediaDownloadsTest(unittest.FederatingHomeserverTestCase): # check that the png file exists and matches what was uploaded found_file = any(SMALL_PNG in field for field in stripped_bytes) self.assertTrue(found_file) + + +class FederationThumbnailTest(unittest.FederatingHomeserverTestCase): + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) + self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-") + self.addCleanup(shutil.rmtree, self.test_dir) + self.primary_base_path = os.path.join(self.test_dir, "primary") + self.secondary_base_path = os.path.join(self.test_dir, "secondary") + + hs.config.media.media_store_path = self.primary_base_path + + storage_providers = [ + StorageProviderWrapper( + FileStorageProviderBackend(hs, self.secondary_base_path), + store_local=True, + store_remote=False, + store_synchronous=True, + ) + ] + + self.filepaths = MediaFilePaths(self.primary_base_path) + self.media_storage = MediaStorage( + hs, self.primary_base_path, self.filepaths, storage_providers + ) + self.media_repo = hs.get_media_repository() + + def test_thumbnail_download_scaled(self) -> None: + content = io.BytesIO(small_png.data) + content_uri = self.get_success( + self.media_repo.create_content( + "image/png", + "test_png_thumbnail", + content, + 67, + UserID.from_string("@user_id:whatever.org"), + ) + ) + # test with an image file + channel = self.make_signed_federation_request( + "GET", + f"/_matrix/federation/v1/media/thumbnail/{content_uri.media_id}?width=32&height=32&method=scale", + ) + self.pump() + self.assertEqual(200, channel.code) + + content_type = channel.headers.getRawHeaders("content-type") + assert content_type is not None + assert "multipart/mixed" in content_type[0] + assert "boundary" in content_type[0] + + # extract boundary + boundary = content_type[0].split("boundary=")[1] + # split on boundary and check that json field and expected value exist + body = channel.result.get("body") + assert body is not None + stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8")) + found_json = any( + b"\r\nContent-Type: application/json\r\n\r\n{}" in field + for field in stripped_bytes + ) + self.assertTrue(found_json) + + # check that the png file exists and matches the expected scaled bytes + found_file = any(small_png.expected_scaled in field for field in stripped_bytes) + self.assertTrue(found_file) + + def test_thumbnail_download_cropped(self) -> None: + content = io.BytesIO(small_png.data) + content_uri = self.get_success( + self.media_repo.create_content( + "image/png", + "test_png_thumbnail", + content, + 67, + UserID.from_string("@user_id:whatever.org"), + ) + ) + # test with an image file + channel = self.make_signed_federation_request( + "GET", + f"/_matrix/federation/v1/media/thumbnail/{content_uri.media_id}?width=32&height=32&method=crop", + ) + self.pump() + self.assertEqual(200, channel.code) + + content_type = channel.headers.getRawHeaders("content-type") + assert content_type is not None + assert "multipart/mixed" in content_type[0] + assert "boundary" in content_type[0] + + # extract boundary + boundary = content_type[0].split("boundary=")[1] + # split on boundary and check that json field and expected value exist + body = channel.result.get("body") + assert body is not None + stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8")) + found_json = any( + b"\r\nContent-Type: application/json\r\n\r\n{}" in field + for field in stripped_bytes + ) + self.assertTrue(found_json) + + # check that the png file exists and matches the expected cropped bytes + found_file = any( + small_png.expected_cropped in field for field in stripped_bytes + ) + self.assertTrue(found_file) diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index 024086b775..70912e22f8 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -import itertools import os import shutil import tempfile @@ -227,19 +226,15 @@ test_images = [ empty_file, SVG, ] -urls = [ - "_matrix/media/r0/thumbnail", - "_matrix/client/unstable/org.matrix.msc3916/media/thumbnail", -] +input_values = [(x,) for x in test_images] -@parameterized_class(("test_image", "url"), itertools.product(test_images, urls)) +@parameterized_class(("test_image",), input_values) class MediaRepoTests(unittest.HomeserverTestCase): servlets = [media.register_servlets] test_image: ClassVar[TestImage] hijack_auth = True user_id = "@test:user" - url: ClassVar[str] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.fetches: List[ @@ -304,7 +299,6 @@ class MediaRepoTests(unittest.HomeserverTestCase): "config": {"directory": self.storage_path}, } config["media_storage_providers"] = [provider_config] - config["experimental_features"] = {"msc3916_authenticated_media_enabled": True} hs = self.setup_test_homeserver(config=config, federation_http_client=client) @@ -509,7 +503,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): params = "?width=32&height=32&method=scale" channel = self.make_request( "GET", - f"/{self.url}/{self.media_id}{params}", + f"/_matrix/media/r0/thumbnail/{self.media_id}{params}", shorthand=False, await_result=False, ) @@ -537,7 +531,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - f"/{self.url}/{self.media_id}{params}", + f"/_matrix/media/r0/thumbnail/{self.media_id}{params}", shorthand=False, await_result=False, ) @@ -573,7 +567,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): params = "?width=32&height=32&method=" + method channel = self.make_request( "GET", - f"/{self.url}/{self.media_id}{params}", + f"/_matrix/media/r0/thumbnail/{self.media_id}{params}", shorthand=False, await_result=False, ) @@ -608,7 +602,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): channel.json_body, { "errcode": "M_UNKNOWN", - "error": f"Cannot find any thumbnails for the requested media ('/{self.url}/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)", + "error": "Cannot find any thumbnails for the requested media ('/_matrix/media/r0/thumbnail/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)", }, ) else: @@ -618,7 +612,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): channel.json_body, { "errcode": "M_NOT_FOUND", - "error": f"Not found '/{self.url}/example.com/12345'", + "error": "Not found '/_matrix/media/r0/thumbnail/example.com/12345'", }, ) diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py index 6b5af2dbb6..7f2caed7d5 100644 --- a/tests/rest/client/test_media.py +++ b/tests/rest/client/test_media.py @@ -23,12 +23,15 @@ import io import json import os import re -from typing import Any, BinaryIO, ClassVar, Dict, List, Optional, Sequence, Tuple, Type +import shutil +from typing import Any, BinaryIO, Dict, List, Optional, Sequence, Tuple, Type from unittest.mock import MagicMock, Mock, patch from urllib import parse from urllib.parse import quote, urlencode -from parameterized import parameterized_class +from parameterized import parameterized, parameterized_class +from PIL import Image as Image +from typing_extensions import ClassVar from twisted.internet import defer from twisted.internet._resolver import HostResolution @@ -40,7 +43,6 @@ from twisted.python.failure import Failure from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor from twisted.web.http_headers import Headers from twisted.web.iweb import UNKNOWN_LENGTH, IResponse -from twisted.web.resource import Resource from synapse.api.errors import HttpResponseException from synapse.api.ratelimiting import Ratelimiter @@ -48,7 +50,8 @@ from synapse.config.oembed import OEmbedEndpointConfig from synapse.http.client import MultipartResponse from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable -from synapse.media._base import FileInfo +from synapse.media._base import FileInfo, ThumbnailInfo +from synapse.media.thumbnailer import ThumbnailProvider from synapse.media.url_previewer import IMAGE_CACHE_EXPIRY_MS from synapse.rest import admin from synapse.rest.client import login, media @@ -76,7 +79,7 @@ except ImportError: lxml = None # type: ignore[assignment] -class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase): +class MediaDomainBlockingTests(unittest.HomeserverTestCase): remote_media_id = "doesnotmatter" remote_server_name = "evil.com" servlets = [ @@ -144,7 +147,6 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase): # Should result in a 404. "prevent_media_downloads_from": ["evil.com"], "dynamic_thumbnails": True, - "experimental_features": {"msc3916_authenticated_media_enabled": True}, } ) def test_cannot_download_blocked_media_thumbnail(self) -> None: @@ -153,7 +155,7 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase): """ response = self.make_request( "GET", - f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100", + f"/_matrix/client/v1/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100", shorthand=False, content={"width": 100, "height": 100}, access_token=self.tok, @@ -166,7 +168,6 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase): # This proves we haven't broken anything. "prevent_media_downloads_from": ["not-listed.com"], "dynamic_thumbnails": True, - "experimental_features": {"msc3916_authenticated_media_enabled": True}, } ) def test_remote_media_thumbnail_normally_unblocked(self) -> None: @@ -175,14 +176,14 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase): """ response = self.make_request( "GET", - f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100", + f"/_matrix/client/v1/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100", shorthand=False, access_token=self.tok, ) self.assertEqual(response.code, 200) -class UnstableURLPreviewTests(unittest.HomeserverTestCase): +class URLPreviewTests(unittest.HomeserverTestCase): if not lxml: skip = "url preview feature requires lxml" @@ -198,7 +199,6 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() - config["experimental_features"] = {"msc3916_authenticated_media_enabled": True} config["url_preview_enabled"] = True config["max_spider_size"] = 9999999 config["url_preview_ip_range_blacklist"] = ( @@ -284,18 +284,6 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): self.reactor.nameResolver = Resolver() # type: ignore[assignment] - def create_resource_dict(self) -> Dict[str, Resource]: - """Create a resource tree for the test server - - A resource tree is a mapping from path to twisted.web.resource. - - The default implementation creates a JsonResource and calls each function in - `servlets` to register servlets against it. - """ - resources = super().create_resource_dict() - resources["/_matrix/media"] = self.hs.get_media_repository_resource() - return resources - def _assert_small_png(self, json_body: JsonDict) -> None: """Assert properties from the SMALL_PNG test image.""" self.assertTrue(json_body["og:image"].startswith("mxc://")) @@ -309,7 +297,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -334,7 +322,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): # Check the cache returns the correct response channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, ) @@ -352,7 +340,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): # Check the database cache returns the correct response channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, ) @@ -375,7 +363,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -405,7 +393,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -441,7 +429,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -482,7 +470,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -517,7 +505,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -550,7 +538,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com", + "/_matrix/client/v1/media/preview_url?url=http://example.com", shorthand=False, await_result=False, ) @@ -580,7 +568,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com", + "/_matrix/client/v1/media/preview_url?url=http://example.com", shorthand=False, ) @@ -603,7 +591,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com", + "/_matrix/client/v1/media/preview_url?url=http://example.com", shorthand=False, ) @@ -622,7 +610,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): """ channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://192.168.1.1", + "/_matrix/client/v1/media/preview_url?url=http://192.168.1.1", shorthand=False, ) @@ -640,7 +628,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): """ channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://1.1.1.2", + "/_matrix/client/v1/media/preview_url?url=http://1.1.1.2", shorthand=False, ) @@ -659,7 +647,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com", + "/_matrix/client/v1/media/preview_url?url=http://example.com", shorthand=False, await_result=False, ) @@ -696,7 +684,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com", + "/_matrix/client/v1/media/preview_url?url=http://example.com", shorthand=False, ) self.assertEqual(channel.code, 502) @@ -718,7 +706,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com", + "/_matrix/client/v1/media/preview_url?url=http://example.com", shorthand=False, ) @@ -741,7 +729,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com", + "/_matrix/client/v1/media/preview_url?url=http://example.com", shorthand=False, ) @@ -760,7 +748,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): """ channel = self.make_request( "OPTIONS", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com", + "/_matrix/client/v1/media/preview_url?url=http://example.com", shorthand=False, ) self.assertEqual(channel.code, 204) @@ -774,7 +762,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): # Build and make a request to the server channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com", + "/_matrix/client/v1/media/preview_url?url=http://example.com", shorthand=False, await_result=False, ) @@ -827,7 +815,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -877,7 +865,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -919,7 +907,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -959,7 +947,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -1000,7 +988,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - f"/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?{query_params}", + f"/_matrix/client/v1/media/preview_url?{query_params}", shorthand=False, ) self.pump() @@ -1021,7 +1009,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org", + "/_matrix/client/v1/media/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -1058,7 +1046,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345", + "/_matrix/client/v1/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1118,7 +1106,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345", + "/_matrix/client/v1/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1167,7 +1155,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.hulu.com/watch/12345", + "/_matrix/client/v1/media/preview_url?url=http://www.hulu.com/watch/12345", shorthand=False, await_result=False, ) @@ -1212,7 +1200,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345", + "/_matrix/client/v1/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1241,7 +1229,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", + "/_matrix/client/v1/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1333,7 +1321,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", + "/_matrix/client/v1/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1374,7 +1362,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://cdn.twitter.com/matrixdotorg", + "/_matrix/client/v1/media/preview_url?url=http://cdn.twitter.com/matrixdotorg", shorthand=False, await_result=False, ) @@ -1416,7 +1404,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): # Check fetching channel = self.make_request( "GET", - f"/_matrix/media/v3/download/{host}/{media_id}", + f"/_matrix/client/v1/media/download/{host}/{media_id}", shorthand=False, await_result=False, ) @@ -1429,7 +1417,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - f"/_matrix/media/v3/download/{host}/{media_id}", + f"/_matrix/client/v1/download/{host}/{media_id}", shorthand=False, await_result=False, ) @@ -1464,7 +1452,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): # Check fetching channel = self.make_request( "GET", - f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale", + f"/_matrix/client/v1/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale", shorthand=False, await_result=False, ) @@ -1482,7 +1470,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale", + f"/_matrix/client/v1/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale", shorthand=False, await_result=False, ) @@ -1532,8 +1520,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=" - + bad_url, + "/_matrix/client/v1/media/preview_url?url=" + bad_url, shorthand=False, await_result=False, ) @@ -1542,8 +1529,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=" - + good_url, + "/_matrix/client/v1/media/preview_url?url=" + good_url, shorthand=False, await_result=False, ) @@ -1575,8 +1561,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=" - + bad_url, + "/_matrix/client/v1/media/preview_url?url=" + bad_url, shorthand=False, await_result=False, ) @@ -1584,7 +1569,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 403, channel.result) -class UnstableMediaConfigTest(unittest.HomeserverTestCase): +class MediaConfigTest(unittest.HomeserverTestCase): servlets = [ media.register_servlets, admin.register_servlets, @@ -1595,7 +1580,6 @@ class UnstableMediaConfigTest(unittest.HomeserverTestCase): self, reactor: ThreadedMemoryReactorClock, clock: Clock ) -> HomeServer: config = self.default_config() - config["experimental_features"] = {"msc3916_authenticated_media_enabled": True} self.storage_path = self.mktemp() self.media_store_path = self.mktemp() @@ -1622,7 +1606,7 @@ class UnstableMediaConfigTest(unittest.HomeserverTestCase): def test_media_config(self) -> None: channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc3916/media/config", + "/_matrix/client/v1/media/config", shorthand=False, access_token=self.tok, ) @@ -1899,7 +1883,7 @@ input_values = [(x,) for x in test_images] @parameterized_class(("test_image",), input_values) -class DownloadTestCase(unittest.HomeserverTestCase): +class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): test_image: ClassVar[TestImage] servlets = [ media.register_servlets, @@ -2005,7 +1989,6 @@ class DownloadTestCase(unittest.HomeserverTestCase): "config": {"directory": self.storage_path}, } config["media_storage_providers"] = [provider_config] - config["experimental_features"] = {"msc3916_authenticated_media_enabled": True} hs = self.setup_test_homeserver(config=config, federation_http_client=client) @@ -2164,7 +2147,7 @@ class DownloadTestCase(unittest.HomeserverTestCase): def test_unknown_federation_endpoint(self) -> None: """ - Test that if the downloadd request to remote federation endpoint returns a 404 + Test that if the download request to remote federation endpoint returns a 404 we fall back to the _matrix/media endpoint """ channel = self.make_request( @@ -2210,3 +2193,236 @@ class DownloadTestCase(unittest.HomeserverTestCase): self.pump() self.assertEqual(channel.code, 200) + + def test_thumbnail_crop(self) -> None: + """Test that a cropped remote thumbnail is available.""" + self._test_thumbnail( + "crop", + self.test_image.expected_cropped, + expected_found=self.test_image.expected_found, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) + + def test_thumbnail_scale(self) -> None: + """Test that a scaled remote thumbnail is available.""" + self._test_thumbnail( + "scale", + self.test_image.expected_scaled, + expected_found=self.test_image.expected_found, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) + + def test_invalid_type(self) -> None: + """An invalid thumbnail type is never available.""" + self._test_thumbnail( + "invalid", + None, + expected_found=False, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) + + @unittest.override_config( + {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]} + ) + def test_no_thumbnail_crop(self) -> None: + """ + Override the config to generate only scaled thumbnails, but request a cropped one. + """ + self._test_thumbnail( + "crop", + None, + expected_found=False, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) + + @unittest.override_config( + {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]} + ) + def test_no_thumbnail_scale(self) -> None: + """ + Override the config to generate only cropped thumbnails, but request a scaled one. + """ + self._test_thumbnail( + "scale", + None, + expected_found=False, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) + + def test_thumbnail_repeated_thumbnail(self) -> None: + """Test that fetching the same thumbnail works, and deleting the on disk + thumbnail regenerates it. + """ + self._test_thumbnail( + "scale", + self.test_image.expected_scaled, + expected_found=self.test_image.expected_found, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) + + if not self.test_image.expected_found: + return + + # Fetching again should work, without re-requesting the image from the + # remote. + params = "?width=32&height=32&method=scale" + channel = self.make_request( + "GET", + f"/_matrix/client/v1/media/thumbnail/{self.remote}/{self.media_id}{params}", + shorthand=False, + await_result=False, + access_token=self.tok, + ) + self.pump() + + self.assertEqual(channel.code, 200) + if self.test_image.expected_scaled: + self.assertEqual( + channel.result["body"], + self.test_image.expected_scaled, + channel.result["body"], + ) + + # Deleting the thumbnail on disk then re-requesting it should work as + # Synapse should regenerate missing thumbnails. + info = self.get_success( + self.store.get_cached_remote_media(self.remote, self.media_id) + ) + assert info is not None + file_id = info.filesystem_id + + thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir( + self.remote, file_id + ) + shutil.rmtree(thumbnail_dir, ignore_errors=True) + + channel = self.make_request( + "GET", + f"/_matrix/client/v1/media/thumbnail/{self.remote}/{self.media_id}{params}", + shorthand=False, + await_result=False, + access_token=self.tok, + ) + self.pump() + + self.assertEqual(channel.code, 200) + if self.test_image.expected_scaled: + self.assertEqual( + channel.result["body"], + self.test_image.expected_scaled, + channel.result["body"], + ) + + def _test_thumbnail( + self, + method: str, + expected_body: Optional[bytes], + expected_found: bool, + unable_to_thumbnail: bool = False, + ) -> None: + """Test the given thumbnailing method works as expected. + + Args: + method: The thumbnailing method to use (crop, scale). + expected_body: The expected bytes from thumbnailing, or None if + test should just check for a valid image. + expected_found: True if the file should exist on the server, or False if + a 404/400 is expected. + unable_to_thumbnail: True if we expect the thumbnailing to fail (400), or + False if the thumbnailing should succeed or a normal 404 is expected. + """ + + params = "?width=32&height=32&method=" + method + channel = self.make_request( + "GET", + f"/_matrix/client/v1/media/thumbnail/{self.remote}/{self.media_id}{params}", + shorthand=False, + await_result=False, + access_token=self.tok, + ) + self.pump() + headers = { + b"Content-Length": [b"%d" % (len(self.test_image.data))], + b"Content-Type": [self.test_image.content_type], + } + self.fetches[0][0].callback( + (self.test_image.data, (len(self.test_image.data), headers)) + ) + self.pump() + if expected_found: + self.assertEqual(channel.code, 200) + + self.assertEqual( + channel.headers.getRawHeaders(b"Cross-Origin-Resource-Policy"), + [b"cross-origin"], + ) + + if expected_body is not None: + self.assertEqual( + channel.result["body"], expected_body, channel.result["body"] + ) + else: + # ensure that the result is at least some valid image + Image.open(io.BytesIO(channel.result["body"])) + elif unable_to_thumbnail: + # A 400 with a JSON body. + self.assertEqual(channel.code, 400) + self.assertEqual( + channel.json_body, + { + "errcode": "M_UNKNOWN", + "error": "Cannot find any thumbnails for the requested media ('/_matrix/client/v1/media/thumbnail/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)", + }, + ) + else: + # A 404 with a JSON body. + self.assertEqual(channel.code, 404) + self.assertEqual( + channel.json_body, + { + "errcode": "M_NOT_FOUND", + "error": "Not found '/_matrix/client/v1/media/thumbnail/example.com/12345'", + }, + ) + + @parameterized.expand([("crop", 16), ("crop", 64), ("scale", 16), ("scale", 64)]) + def test_same_quality(self, method: str, desired_size: int) -> None: + """Test that choosing between thumbnails with the same quality rating succeeds. + + We are not particular about which thumbnail is chosen.""" + + content_type = self.test_image.content_type.decode() + media_repo = self.hs.get_media_repository() + thumbnail_provider = ThumbnailProvider( + self.hs, media_repo, media_repo.media_storage + ) + + self.assertIsNotNone( + thumbnail_provider._select_thumbnail( + desired_width=desired_size, + desired_height=desired_size, + desired_method=method, + desired_type=content_type, + # Provide two identical thumbnails which are guaranteed to have the same + # quality rating. + thumbnail_infos=[ + ThumbnailInfo( + width=32, + height=32, + method=method, + type=content_type, + length=256, + ), + ThumbnailInfo( + width=32, + height=32, + method=method, + type=content_type, + length=256, + ), + ], + file_id=f"image{self.test_image.extension.decode()}", + url_cache=False, + server_name=None, + ) + ) From 4d7e53707c82e0cbb8ea4c00a97339bba0aecd56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 10:32:09 +0100 Subject: [PATCH 002/332] Bump certifi from 2023.7.22 to 2024.7.4 (#17404) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3a322b773e..7838c3d308 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -182,13 +182,13 @@ files = [ [[package]] name = "certifi" -version = "2023.7.22" +version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, ] [[package]] From c896030f679ad4987df015970a0c55aa4ffe8466 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 8 Jul 2024 14:08:11 +0200 Subject: [PATCH 003/332] MSC3861: allow overriding the introspection endpoint (#17406) This makes it easier to go through an internal endpoint instead of the public facing URL when introspecting tokens, reducing latency. --- changelog.d/17406.misc | 1 + synapse/api/auth/msc3861_delegated.py | 15 +++++++++++++-- synapse/config/experimental.py | 6 ++++++ 3 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17406.misc diff --git a/changelog.d/17406.misc b/changelog.d/17406.misc new file mode 100644 index 0000000000..83f34cac43 --- /dev/null +++ b/changelog.d/17406.misc @@ -0,0 +1 @@ +MSC3861: allow overriding the introspection endpoint. diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index f61b39ded7..7361666c77 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -145,6 +145,18 @@ class MSC3861DelegatedAuth(BaseAuth): # metadata.validate_introspection_endpoint() return metadata + async def _introspection_endpoint(self) -> str: + """ + Returns the introspection endpoint of the issuer + + It uses the config option if set, otherwise it will use OIDC discovery to get it + """ + if self._config.introspection_endpoint is not None: + return self._config.introspection_endpoint + + metadata = await self._load_metadata() + return metadata.get("introspection_endpoint") + async def _introspect_token(self, token: str) -> IntrospectionToken: """ Send a token to the introspection endpoint and returns the introspection response @@ -161,8 +173,7 @@ class MSC3861DelegatedAuth(BaseAuth): Returns: The introspection response """ - metadata = await self._issuer_metadata.get() - introspection_endpoint = metadata.get("introspection_endpoint") + introspection_endpoint = await self._introspection_endpoint() raw_headers: Dict[str, str] = { "Content-Type": "application/x-www-form-urlencoded", "User-Agent": str(self._http_client.user_agent, "utf-8"), diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index c21b7eb37e..bae9cc8047 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -140,6 +140,12 @@ class MSC3861: ("experimental", "msc3861", "client_auth_method"), ) + introspection_endpoint: Optional[str] = attr.ib( + default=None, + validator=attr.validators.optional(attr.validators.instance_of(str)), + ) + """The URL of the introspection endpoint used to validate access tokens.""" + account_management_url: Optional[str] = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(str)), From ccbd619b430ad5ddf3f11de886559cb2dc5024ca Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 8 Jul 2024 07:37:28 -0600 Subject: [PATCH 004/332] Declare support for Matrix 1.11 (#17403) Previous: https://github.com/element-hq/synapse/pull/17082 Fixes https://github.com/element-hq/synapse/issues/17402 See https://github.com/element-hq/synapse/issues/17402 for context **Blocked on https://github.com/element-hq/synapse/pull/17388** (required for spec compliance) --- changelog.d/17403.feature | 1 + synapse/rest/client/versions.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/17403.feature diff --git a/changelog.d/17403.feature b/changelog.d/17403.feature new file mode 100644 index 0000000000..b1868d7608 --- /dev/null +++ b/changelog.d/17403.feature @@ -0,0 +1 @@ +Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). \ No newline at end of file diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index e01e5f542a..84cf388bd4 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -102,6 +102,7 @@ class VersionsRestServlet(RestServlet): "v1.8", "v1.9", "v1.10", + "v1.11", ], # as per MSC1497: "unstable_features": { From 7879f288df0b6970523f66eb851c6ec640d1ac27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 15:24:01 +0100 Subject: [PATCH 005/332] Bump pydantic from 2.7.1 to 2.8.2 (#17415) --- poetry.lock | 183 ++++++++++++++++++++++++++++------------------------ 1 file changed, 98 insertions(+), 85 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7838c3d308..9bbbd919a6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1760,109 +1760,122 @@ files = [ [[package]] name = "pydantic" -version = "2.7.1" +version = "2.8.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, - {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.2" -typing-extensions = ">=4.6.1" +pydantic-core = "2.20.1" +typing-extensions = [ + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, +] [package.extras] email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.2" +version = "2.20.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, - {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, - {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, - {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, - {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, - {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, - {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, - {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, - {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, - {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, - {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, - {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, - {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, - {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, ] [package.dependencies] From 472117724137579a03d05504cab9127172e4f8ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 15:24:11 +0100 Subject: [PATCH 006/332] Bump mypy-zope from 1.0.4 to 1.0.5 (#17414) --- poetry.lock | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9bbbd919a6..301205ec83 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1442,17 +1442,16 @@ files = [ [[package]] name = "mypy-zope" -version = "1.0.4" +version = "1.0.5" description = "Plugin for mypy to support zope interfaces" optional = false python-versions = "*" files = [ - {file = "mypy-zope-1.0.4.tar.gz", hash = "sha256:a9569e73ae85a65247787d98590fa6d4290e76f26aabe035d1c3e94a0b9ab6ee"}, - {file = "mypy_zope-1.0.4-py3-none-any.whl", hash = "sha256:c7298f93963a84f2b145c2b5cc98709fc2a5be4adf54bfe23fa7fdd8fd19c975"}, + {file = "mypy_zope-1.0.5.tar.gz", hash = "sha256:2440406d49c0e1199c1cd819c92a2c4957de65579c6abc8a081c927f4bdc8d49"}, ] [package.dependencies] -mypy = ">=1.0.0,<1.10.0" +mypy = ">=1.0.0,<1.11.0" "zope.interface" = "*" "zope.schema" = "*" From 23eed4f72abebfa9c7518d77249f9b8f82703fbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 15:24:21 +0100 Subject: [PATCH 007/332] Bump serde from 1.0.203 to 1.0.204 (#17409) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4353e55977..84563699a5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", From 62134dcc77b34631dc43c6ae976d1cebdabc5181 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 15:24:30 +0100 Subject: [PATCH 008/332] Bump serde_json from 1.0.119 to 1.0.120 (#17408) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84563699a5..3a8bf7a49c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.119" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8eddb61f0697cc3989c5d64b452f5488e2b8a60fd7d5076a3045076ffef8cb0" +checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" dependencies = [ "itoa", "ryu", From 3fef535ff21436cc17a26b0ec9331cf89901c9e3 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 8 Jul 2024 13:17:08 -0500 Subject: [PATCH 009/332] Add `rooms.bump_stamp` to Sliding Sync `/sync` for easier client-side sorting (#17395) `bump_stamp` corresponds to the `stream_ordering` of the latest `DEFAULT_BUMP_EVENT_TYPES` in the room. This helps clients sort more readily without them needing to pull in a bunch of the timeline to determine the last activity. `bump_event_types` is a thing because for example, we don't want display name changes to mark the room as unread and bump it to the top. For encrypted rooms, we just have to consider any activity as a bump because we can't see the content and the client has to figure it out for themselves. Outside of Synapse, `bump_stamp` is just a free-form counter so other implementations could use `received_ts`or `origin_server_ts` (see the [*Security considerations* section in MSC3575 about the potential pitfalls of using `origin_server_ts`](https://github.com/matrix-org/matrix-spec-proposals/blob/kegan/sync-v3/proposals/3575-sync.md#security-considerations)). It doesn't have any guarantee about always going up. In the Synapse case, it could go down if an event was redacted/removed (or purged in cases of retention policies). In the future, we could add `bump_event_types` as [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) mentions if people need to customize the event types. --- In the Sliding Sync proxy, a similar [`timestamp` field was added](https://github.com/matrix-org/sliding-sync/pull/247) for the same purpose but the name is not obvious what it pertains to or what it's for. The `timestamp` field was also added to Ruma in https://github.com/ruma/ruma/pull/1622 --- changelog.d/17395.feature | 1 + synapse/api/constants.py | 4 + synapse/handlers/sliding_sync.py | 75 +++++++++++++----- synapse/rest/client/sync.py | 1 + synapse/storage/databases/main/stream.py | 35 ++++++--- synapse/types/handlers/__init__.py | 8 ++ tests/handlers/test_sliding_sync.py | 68 ++++++++++++++++- tests/rest/client/test_sync.py | 96 ++++++++++++++++++++++++ tests/storage/test_stream.py | 41 ++++++++++ 9 files changed, 295 insertions(+), 34 deletions(-) create mode 100644 changelog.d/17395.feature diff --git a/changelog.d/17395.feature b/changelog.d/17395.feature new file mode 100644 index 0000000000..0c95b9f4a9 --- /dev/null +++ b/changelog.d/17395.feature @@ -0,0 +1 @@ +Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 9265a271d2..12d18137e0 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -128,9 +128,13 @@ class EventTypes: SpaceParent: Final = "m.space.parent" Reaction: Final = "m.reaction" + Sticker: Final = "m.sticker" + LiveLocationShareStart: Final = "m.beacon_info" CallInvite: Final = "m.call.invite" + PollStart: Final = "m.poll.start" + class ToDeviceEventTypes: RoomKeyRequest: Final = "m.room_key_request" diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index a1ddac903e..8e2f751c02 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -54,6 +54,17 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +# The event types that clients should consider as new activity. +DEFAULT_BUMP_EVENT_TYPES = { + EventTypes.Message, + EventTypes.Encrypted, + EventTypes.Sticker, + EventTypes.CallInvite, + EventTypes.PollStart, + EventTypes.LiveLocationShareStart, +} + + def filter_membership_for_sync( *, membership: str, user_id: str, sender: Optional[str] ) -> bool: @@ -285,6 +296,7 @@ class _RoomMembershipForUser: range """ + room_id: str event_id: Optional[str] event_pos: PersistedEventPosition membership: str @@ -469,7 +481,9 @@ class SlidingSyncHandler: # # Both sides of range are inclusive so we `+ 1` max_num_rooms = range[1] - range[0] + 1 - for room_id, _ in sorted_room_info[range[0] :]: + for room_membership in sorted_room_info[range[0] :]: + room_id = room_membership.room_id + if len(room_ids_in_list) >= max_num_rooms: break @@ -519,7 +533,7 @@ class SlidingSyncHandler: user=sync_config.user, room_id=room_id, room_sync_config=room_sync_config, - rooms_membership_for_user_at_to_token=sync_room_map[room_id], + room_membership_for_user_at_to_token=sync_room_map[room_id], from_token=from_token, to_token=to_token, ) @@ -591,6 +605,7 @@ class SlidingSyncHandler: # (below) because they are potentially from the current snapshot time # instead from the time of the `to_token`. room_for_user.room_id: _RoomMembershipForUser( + room_id=room_for_user.room_id, event_id=room_for_user.event_id, event_pos=room_for_user.event_pos, membership=room_for_user.membership, @@ -691,6 +706,7 @@ class SlidingSyncHandler: is not None ): sync_room_id_set[room_id] = _RoomMembershipForUser( + room_id=room_id, event_id=first_membership_change_after_to_token.prev_event_id, event_pos=first_membership_change_after_to_token.prev_event_pos, membership=first_membership_change_after_to_token.prev_membership, @@ -785,6 +801,7 @@ class SlidingSyncHandler: # is their own leave event if last_membership_change_in_from_to_range.membership == Membership.LEAVE: filtered_sync_room_id_set[room_id] = _RoomMembershipForUser( + room_id=room_id, event_id=last_membership_change_in_from_to_range.event_id, event_pos=last_membership_change_in_from_to_range.event_pos, membership=last_membership_change_in_from_to_range.membership, @@ -969,7 +986,7 @@ class SlidingSyncHandler: self, sync_room_map: Dict[str, _RoomMembershipForUser], to_token: StreamToken, - ) -> List[Tuple[str, _RoomMembershipForUser]]: + ) -> List[_RoomMembershipForUser]: """ Sort by `stream_ordering` of the last event that the user should see in the room. `stream_ordering` is unique so we get a stable sort. @@ -1007,12 +1024,17 @@ class SlidingSyncHandler: else: # Otherwise, if the user has left/been invited/knocked/been banned from # a room, they shouldn't see anything past that point. + # + # FIXME: It's possible that people should see beyond this point in + # invited/knocked cases if for example the room has + # `invite`/`world_readable` history visibility, see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 last_activity_in_room_map[room_id] = room_for_user.event_pos.stream return sorted( - sync_room_map.items(), + sync_room_map.values(), # Sort by the last activity (stream_ordering) in the room - key=lambda room_info: last_activity_in_room_map[room_info[0]], + key=lambda room_info: last_activity_in_room_map[room_info.room_id], # We want descending order reverse=True, ) @@ -1022,7 +1044,7 @@ class SlidingSyncHandler: user: UserID, room_id: str, room_sync_config: RoomSyncConfig, - rooms_membership_for_user_at_to_token: _RoomMembershipForUser, + room_membership_for_user_at_to_token: _RoomMembershipForUser, from_token: Optional[StreamToken], to_token: StreamToken, ) -> SlidingSyncResult.RoomResult: @@ -1036,7 +1058,7 @@ class SlidingSyncHandler: room_id: The room ID to fetch data for room_sync_config: Config for what data we should fetch for a room in the sync response. - rooms_membership_for_user_at_to_token: Membership information for the user + room_membership_for_user_at_to_token: Membership information for the user in the room at the time of `to_token`. from_token: The point in the stream to sync from. to_token: The point in the stream to sync up to. @@ -1056,7 +1078,7 @@ class SlidingSyncHandler: if ( room_sync_config.timeline_limit > 0 # No timeline for invite/knock rooms (just `stripped_state`) - and rooms_membership_for_user_at_to_token.membership + and room_membership_for_user_at_to_token.membership not in (Membership.INVITE, Membership.KNOCK) ): limited = False @@ -1069,12 +1091,12 @@ class SlidingSyncHandler: # We're going to paginate backwards from the `to_token` from_bound = to_token.room_key # People shouldn't see past their leave/ban event - if rooms_membership_for_user_at_to_token.membership in ( + if room_membership_for_user_at_to_token.membership in ( Membership.LEAVE, Membership.BAN, ): from_bound = ( - rooms_membership_for_user_at_to_token.event_pos.to_room_stream_token() + room_membership_for_user_at_to_token.event_pos.to_room_stream_token() ) # Determine whether we should limit the timeline to the token range. @@ -1089,7 +1111,7 @@ class SlidingSyncHandler: to_bound = ( from_token.room_key if from_token is not None - and not rooms_membership_for_user_at_to_token.newly_joined + and not room_membership_for_user_at_to_token.newly_joined else None ) @@ -1126,7 +1148,7 @@ class SlidingSyncHandler: self.storage_controllers, user.to_string(), timeline_events, - is_peeking=rooms_membership_for_user_at_to_token.membership + is_peeking=room_membership_for_user_at_to_token.membership != Membership.JOIN, filter_send_to_client=True, ) @@ -1181,16 +1203,16 @@ class SlidingSyncHandler: # Figure out any stripped state events for invite/knocks. This allows the # potential joiner to identify the room. stripped_state: List[JsonDict] = [] - if rooms_membership_for_user_at_to_token.membership in ( + if room_membership_for_user_at_to_token.membership in ( Membership.INVITE, Membership.KNOCK, ): # This should never happen. If someone is invited/knocked on room, then # there should be an event for it. - assert rooms_membership_for_user_at_to_token.event_id is not None + assert room_membership_for_user_at_to_token.event_id is not None invite_or_knock_event = await self.store.get_event( - rooms_membership_for_user_at_to_token.event_id + room_membership_for_user_at_to_token.event_id ) stripped_state = [] @@ -1206,7 +1228,7 @@ class SlidingSyncHandler: stripped_state.append(strip_event(invite_or_knock_event)) # TODO: Handle state resets. For example, if we see - # `rooms_membership_for_user_at_to_token.membership = Membership.LEAVE` but + # `room_membership_for_user_at_to_token.membership = Membership.LEAVE` but # `required_state` doesn't include it, we should indicate to the client that a # state reset happened. Perhaps we should indicate this by setting `initial: # True` and empty `required_state`. @@ -1226,7 +1248,7 @@ class SlidingSyncHandler: # `invite`/`knock` rooms only have `stripped_state`. See # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 room_state: Optional[StateMap[EventBase]] = None - if rooms_membership_for_user_at_to_token.membership not in ( + if room_membership_for_user_at_to_token.membership not in ( Membership.INVITE, Membership.KNOCK, ): @@ -1303,7 +1325,7 @@ class SlidingSyncHandler: # initial sync if initial: # People shouldn't see past their leave/ban event - if rooms_membership_for_user_at_to_token.membership in ( + if room_membership_for_user_at_to_token.membership in ( Membership.LEAVE, Membership.BAN, ): @@ -1311,7 +1333,7 @@ class SlidingSyncHandler: room_id, stream_position=to_token.copy_and_replace( StreamKeyType.ROOM, - rooms_membership_for_user_at_to_token.event_pos.to_room_stream_token(), + room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), ), state_filter=state_filter, # Partially-stated rooms should have all state events except for @@ -1341,6 +1363,20 @@ class SlidingSyncHandler: # we can return updates instead of the full required state. raise NotImplementedError() + # Figure out the last bump event in the room + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, to_token.room_key, event_types=DEFAULT_BUMP_EVENT_TYPES + ) + ) + + # By default, just choose the membership event position + bump_stamp = room_membership_for_user_at_to_token.event_pos.stream + # But if we found a bump event, use that instead + if last_bump_event_result is not None: + _, bump_event_pos = last_bump_event_result + bump_stamp = bump_event_pos.stream + return SlidingSyncResult.RoomResult( # TODO: Dummy value name=None, @@ -1358,6 +1394,7 @@ class SlidingSyncHandler: prev_batch=prev_batch_token, limited=limited, num_live=num_live, + bump_stamp=bump_stamp, # TODO: Dummy values joined_count=0, invited_count=0, diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 2a22bc14ec..13aed1dc85 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -982,6 +982,7 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms: Dict[str, JsonDict] = {} for room_id, room_result in rooms.items(): serialized_rooms[room_id] = { + "bump_stamp": room_result.bump_stamp, "joined_count": room_result.joined_count, "invited_count": room_result.invited_count, "notification_count": room_result.notification_count, diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index d34376b8df..be81025355 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1178,6 +1178,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self, room_id: str, end_token: RoomStreamToken, + event_types: Optional[Collection[str]] = None, ) -> Optional[Tuple[str, PersistedEventPosition]]: """ Returns the ID and event position of the last event in a room at or before a @@ -1186,6 +1187,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Args: room_id end_token: The token used to stream from + event_types: Optional allowlist of event types to filter by Returns: The ID of the most recent event and it's position, or None if there are no @@ -1207,9 +1209,17 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): min_stream = end_token.stream max_stream = end_token.get_max_stream_pos() - # We use `union all` because we don't need any of the deduplication logic - # (`union` is really a union + distinct). `UNION ALL` does preserve the - # ordering of the operand queries but there is no actual gurantee that it + event_type_clause = "" + event_type_args: List[str] = [] + if event_types is not None and len(event_types) > 0: + event_type_clause, event_type_args = make_in_list_sql_clause( + txn.database_engine, "type", event_types + ) + event_type_clause = f"AND {event_type_clause}" + + # We use `UNION ALL` because we don't need any of the deduplication logic + # (`UNION` is really a `UNION` + `DISTINCT`). `UNION ALL` does preserve the + # ordering of the operand queries but there is no actual guarantee that it # has this behavior in all scenarios so we need the extra `ORDER BY` at the # bottom. sql = """ @@ -1218,6 +1228,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): FROM events LEFT JOIN rejections USING (event_id) WHERE room_id = ? + %s AND ? < stream_ordering AND stream_ordering <= ? AND NOT outlier AND rejections.event_id IS NULL @@ -1229,6 +1240,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): FROM events LEFT JOIN rejections USING (event_id) WHERE room_id = ? + %s AND stream_ordering <= ? AND NOT outlier AND rejections.event_id IS NULL @@ -1236,16 +1248,17 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): LIMIT 1 ) AS b ORDER BY stream_ordering DESC - """ + """ % ( + event_type_clause, + event_type_clause, + ) txn.execute( sql, - ( - room_id, - min_stream, - max_stream, - room_id, - min_stream, - ), + [room_id] + + event_type_args + + [min_stream, max_stream, room_id] + + event_type_args + + [min_stream], ) for instance_name, stream_ordering, topological_ordering, event_id in txn: diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 3bd3268e59..43dcdf20dd 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -183,6 +183,13 @@ class SlidingSyncResult: events because if a room not in the sliding window bumps into the window because of an @mention it will have `initial: true` yet contain a single live event (with potentially other old events in the timeline). + bump_stamp: The `stream_ordering` of the last event according to the + `bump_event_types`. This helps clients sort more readily without them + needing to pull in a bunch of the timeline to determine the last activity. + `bump_event_types` is a thing because for example, we don't want display + name changes to mark the room as unread and bump it to the top. For + encrypted rooms, we just have to consider any activity as a bump because we + can't see the content and the client has to figure it out for themselves. joined_count: The number of users with membership of join, including the client's own user ID. (same as sync `v2 m.joined_member_count`) invited_count: The number of users with membership of invite. (same as sync v2 @@ -211,6 +218,7 @@ class SlidingSyncResult: limited: Optional[bool] # Only optional because it won't be included for invite/knock rooms with `stripped_state` num_live: Optional[int] + bump_stamp: int joined_count: int invited_count: int notification_count: int diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 5f83b637c5..9dd2363adc 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -2844,7 +2844,7 @@ class SortRoomsTestCase(HomeserverTestCase): ) # Sort the rooms (what we're testing) - sorted_room_info = self.get_success( + sorted_sync_rooms = self.get_success( self.sliding_sync_handler.sort_rooms( sync_room_map=sync_room_map, to_token=after_rooms_token, @@ -2852,7 +2852,7 @@ class SortRoomsTestCase(HomeserverTestCase): ) self.assertEqual( - [room_id for room_id, _ in sorted_room_info], + [room_membership.room_id for room_membership in sorted_sync_rooms], [room_id2, room_id1], ) @@ -2927,7 +2927,7 @@ class SortRoomsTestCase(HomeserverTestCase): ) # Sort the rooms (what we're testing) - sorted_room_info = self.get_success( + sorted_sync_rooms = self.get_success( self.sliding_sync_handler.sort_rooms( sync_room_map=sync_room_map, to_token=after_rooms_token, @@ -2935,7 +2935,7 @@ class SortRoomsTestCase(HomeserverTestCase): ) self.assertEqual( - [room_id for room_id, _ in sorted_room_info], + [room_membership.room_id for room_membership in sorted_sync_rooms], [room_id2, room_id1, room_id3], "Corresponding map to disambiguate the opaque room IDs: " + str( @@ -2946,3 +2946,63 @@ class SortRoomsTestCase(HomeserverTestCase): } ), ) + + def test_default_bump_event_types(self) -> None: + """ + Test that we only consider the *latest* event in the room when sorting (not + `bump_event_types`). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + message_response = self.helper.send(room_id1, "message in room1", tok=user1_tok) + room_id2 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + self.helper.send(room_id2, "message in room2", tok=user1_tok) + + # Send a reaction in room1 which isn't in `DEFAULT_BUMP_EVENT_TYPES` but we only + # care about sorting by the *latest* event in the room. + self.helper.send_event( + room_id1, + type=EventTypes.Reaction, + content={ + "m.relates_to": { + "event_id": message_response["event_id"], + "key": "👍", + "rel_type": "m.annotation", + } + }, + tok=user1_tok, + ) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self.get_success( + self.sliding_sync_handler.get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, + ) + ) + + # Sort the rooms (what we're testing) + sorted_sync_rooms = self.get_success( + self.sliding_sync_handler.sort_rooms( + sync_room_map=sync_room_map, + to_token=after_rooms_token, + ) + ) + + self.assertEqual( + [room_membership.room_id for room_membership in sorted_sync_rooms], + # room1 sorts before room2 because it has the latest event (the reaction). + # We only care about the *latest* event in the room. + [room_id1, room_id2], + ) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index cb2888409e..6ff1f03c9a 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -2029,6 +2029,102 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["rooms"][room_id1], ) + def test_rooms_bump_stamp(self) -> None: + """ + Test that `bump_stamp` is present and pointing to relevant events. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + event_response1 = message_response = self.helper.send( + room_id1, "message in room1", tok=user1_tok + ) + event_pos1 = self.get_success( + self.store.get_position_for_event(event_response1["event_id"]) + ) + room_id2 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + send_response2 = self.helper.send(room_id2, "message in room2", tok=user1_tok) + event_pos2 = self.get_success( + self.store.get_position_for_event(send_response2["event_id"]) + ) + + # Send a reaction in room1 but it shouldn't affect the `bump_stamp` + # because reactions are not part of the `DEFAULT_BUMP_EVENT_TYPES` + self.helper.send_event( + room_id1, + type=EventTypes.Reaction, + content={ + "m.relates_to": { + "event_id": message_response["event_id"], + "key": "👍", + "rel_type": "m.annotation", + } + }, + tok=user1_tok, + ) + + # Make the Sliding Sync request + timeline_limit = 100 + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": timeline_limit, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Make sure it has the foo-list we requested + self.assertListEqual( + list(channel.json_body["lists"].keys()), + ["foo-list"], + channel.json_body["lists"].keys(), + ) + + # Make sure the list includes the rooms in the right order + self.assertListEqual( + list(channel.json_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 1], + # room1 sorts before room2 because it has the latest event (the + # reaction) + "room_ids": [room_id1, room_id2], + } + ], + channel.json_body["lists"]["foo-list"], + ) + + # The `bump_stamp` for room1 should point at the latest message (not the + # reaction since it's not one of the `DEFAULT_BUMP_EVENT_TYPES`) + self.assertEqual( + channel.json_body["rooms"][room_id1]["bump_stamp"], + event_pos1.stream, + channel.json_body["rooms"][room_id1], + ) + + # The `bump_stamp` for room2 should point at the latest message + self.assertEqual( + channel.json_body["rooms"][room_id2]["bump_stamp"], + event_pos2.stream, + channel.json_body["rooms"][room_id2], + ) + def test_rooms_newly_joined_incremental_sync(self) -> None: """ Test that when we make an incremental sync with a `newly_joined` `rooms`, we are diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index aad46b1b44..9dea1af8ea 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -556,6 +556,47 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase): ), ) + def test_restrict_event_types(self) -> None: + """ + Test that we only consider given `event_types` when finding the last event + before a token. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + event_response = self.helper.send_event( + room_id1, + type="org.matrix.special_message", + content={"body": "before1, target!"}, + tok=user1_tok, + ) + self.helper.send(room_id1, "before2", tok=user1_tok) + + after_room_token = self.event_sources.get_current_token() + + # Send some events after the token + self.helper.send_event( + room_id1, + type="org.matrix.special_message", + content={"body": "after1"}, + tok=user1_tok, + ) + self.helper.send(room_id1, "after2", tok=user1_tok) + + last_event_result = self.get_success( + self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id=room_id1, + end_token=after_room_token.room_key, + event_types=["org.matrix.special_message"], + ) + ) + assert last_event_result is not None + last_event_id, _ = last_event_result + + # Make sure it's the last event before the token + self.assertEqual(last_event_id, event_response["event_id"]) + class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase): """ From 8cdd2d214e9bbf8995ff4920140804ebcea6497e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 8 Jul 2024 20:30:23 +0100 Subject: [PATCH 010/332] Fix bug in sliding sync when using old DB. (#17398) We don't necessarily have `instance_name` for old events (before we support multiple event persisters). We treat those as if the `instance_name` was "master". --------- Co-authored-by: Eric Eastwood --- changelog.d/17398.bugfix | 1 + synapse/storage/_base.py | 6 - synapse/storage/databases/main/cache.py | 10 -- .../storage/databases/main/events_worker.py | 3 +- synapse/storage/databases/main/roommember.py | 67 +--------- synapse/storage/databases/main/stream.py | 33 +++-- tests/handlers/test_sync.py | 1 - tests/replication/storage/test_events.py | 124 +----------------- 8 files changed, 33 insertions(+), 212 deletions(-) create mode 100644 changelog.d/17398.bugfix diff --git a/changelog.d/17398.bugfix b/changelog.d/17398.bugfix new file mode 100644 index 0000000000..7931c431ef --- /dev/null +++ b/changelog.d/17398.bugfix @@ -0,0 +1 @@ +Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index b127289d8d..881888fa93 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -119,9 +119,6 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache( "get_user_in_room_with_profile", (room_id, user_id) ) - self._attempt_to_invalidate_cache( - "get_rooms_for_user_with_stream_ordering", (user_id,) - ) self._attempt_to_invalidate_cache("get_rooms_for_user", (user_id,)) # Purge other caches based on room state. @@ -148,9 +145,6 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None) self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None) - self._attempt_to_invalidate_cache( - "get_rooms_for_user_with_stream_ordering", None - ) self._attempt_to_invalidate_cache("get_rooms_for_user", None) self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index c6787faea0..2d6b75e47e 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -268,16 +268,12 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined] if data.type == EventTypes.Member: - self.get_rooms_for_user_with_stream_ordering.invalidate( # type: ignore[attr-defined] - (data.state_key,) - ) self.get_rooms_for_user.invalidate((data.state_key,)) # type: ignore[attr-defined] elif row.type == EventsStreamAllStateRow.TypeId: assert isinstance(data, EventsStreamAllStateRow) # Similar to the above, but the entire caches are invalidated. This is # unfortunate for the membership caches, but should recover quickly. self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined] - self.get_rooms_for_user_with_stream_ordering.invalidate_all() # type: ignore[attr-defined] self.get_rooms_for_user.invalidate_all() # type: ignore[attr-defined] else: raise Exception("Unknown events stream row type %s" % (row.type,)) @@ -334,9 +330,6 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "get_invited_rooms_for_local_user", (state_key,) ) - self._attempt_to_invalidate_cache( - "get_rooms_for_user_with_stream_ordering", (state_key,) - ) self._attempt_to_invalidate_cache("get_rooms_for_user", (state_key,)) self._attempt_to_invalidate_cache( @@ -399,9 +392,6 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_thread_id", None) self._attempt_to_invalidate_cache("get_thread_id_for_receipts", None) self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None) - self._attempt_to_invalidate_cache( - "get_rooms_for_user_with_stream_ordering", None - ) self._attempt_to_invalidate_cache("get_rooms_for_user", None) self._attempt_to_invalidate_cache("did_forget", None) self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index a5acea8c3b..4d4877c4c3 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1457,7 +1457,8 @@ class EventsWorkerStore(SQLBaseStore): event_dict[event_id] = _EventRow( event_id=event_id, stream_ordering=row[1], - instance_name=row[2], + # If instance_name is null we default to "master" + instance_name=row[2] or "master", internal_metadata=row[3], json=row[4], format_version=row[5], diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index d8b54dc4e3..5d2fd08495 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -50,12 +50,7 @@ from synapse.storage.database import ( from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import Sqlite3Engine -from synapse.storage.roommember import ( - GetRoomsForUserWithStreamOrdering, - MemberSummary, - ProfileInfo, - RoomsForUser, -) +from synapse.storage.roommember import MemberSummary, ProfileInfo, RoomsForUser from synapse.types import ( JsonDict, PersistedEventPosition, @@ -494,7 +489,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): sender=sender, membership=membership, event_id=event_id, - event_pos=PersistedEventPosition(instance_name, stream_ordering), + event_pos=PersistedEventPosition( + # If instance_name is null we default to "master" + instance_name or "master", + stream_ordering, + ), room_version_id=room_version, ) for room_id, sender, membership, event_id, instance_name, stream_ordering, room_version in txn @@ -606,53 +605,6 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): return results - @cached(max_entries=500000, iterable=True) - async def get_rooms_for_user_with_stream_ordering( - self, user_id: str - ) -> FrozenSet[GetRoomsForUserWithStreamOrdering]: - """Returns a set of room_ids the user is currently joined to. - - If a remote user only returns rooms this server is currently - participating in. - - Args: - user_id - - Returns: - Returns the rooms the user is in currently, along with the stream - ordering of the most recent join for that user and room, along with - the room version of the room. - """ - return await self.db_pool.runInteraction( - "get_rooms_for_user_with_stream_ordering", - self._get_rooms_for_user_with_stream_ordering_txn, - user_id, - ) - - def _get_rooms_for_user_with_stream_ordering_txn( - self, txn: LoggingTransaction, user_id: str - ) -> FrozenSet[GetRoomsForUserWithStreamOrdering]: - # We use `current_state_events` here and not `local_current_membership` - # as a) this gets called with remote users and b) this only gets called - # for rooms the server is participating in. - sql = """ - SELECT room_id, e.instance_name, e.stream_ordering - FROM current_state_events AS c - INNER JOIN events AS e USING (room_id, event_id) - WHERE - c.type = 'm.room.member' - AND c.state_key = ? - AND c.membership = ? - """ - - txn.execute(sql, (user_id, Membership.JOIN)) - return frozenset( - GetRoomsForUserWithStreamOrdering( - room_id, PersistedEventPosition(instance, stream_id) - ) - for room_id, instance, stream_id in txn - ) - async def get_users_server_still_shares_room_with( self, user_ids: Collection[str] ) -> Set[str]: @@ -701,13 +653,6 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): If a remote user only returns rooms this server is currently participating in. """ - rooms = self.get_rooms_for_user_with_stream_ordering.cache.get_immediate( - (user_id,), - None, - update_metrics=False, - ) - if rooms: - return frozenset(r.room_id for r in rooms) room_ids = await self.db_pool.simple_select_onecol( table="current_state_events", diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index be81025355..e74e0d2e91 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -371,7 +371,7 @@ def _make_generic_sql_bound( def _filter_results( lower_token: Optional[RoomStreamToken], upper_token: Optional[RoomStreamToken], - instance_name: str, + instance_name: Optional[str], topological_ordering: int, stream_ordering: int, ) -> bool: @@ -384,8 +384,14 @@ def _filter_results( position maps, which we handle by fetching more than necessary from the DB and then filtering (rather than attempting to construct a complicated SQL query). + + The `instance_name` arg is optional to handle historic rows, and is + interpreted as if it was "master". """ + if instance_name is None: + instance_name = "master" + event_historical_tuple = ( topological_ordering, stream_ordering, @@ -420,7 +426,7 @@ def _filter_results( def _filter_results_by_stream( lower_token: Optional[RoomStreamToken], upper_token: Optional[RoomStreamToken], - instance_name: str, + instance_name: Optional[str], stream_ordering: int, ) -> bool: """ @@ -436,7 +442,14 @@ def _filter_results_by_stream( position maps, which we handle by fetching more than necessary from the DB and then filtering (rather than attempting to construct a complicated SQL query). + + The `instance_name` arg is optional to handle historic rows, and is + interpreted as if it was "master". """ + + if instance_name is None: + instance_name = "master" + if lower_token: assert lower_token.topological is None @@ -912,7 +925,6 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): prev_sender, ) in txn: assert room_id is not None - assert instance_name is not None assert stream_ordering is not None if _filter_results_by_stream( @@ -936,7 +948,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # Event event_id=event_id, event_pos=PersistedEventPosition( - instance_name=instance_name, + # If instance_name is null we default to "master" + instance_name=instance_name or "master", stream=stream_ordering, ), # When `s.event_id = null`, we won't be able to get respective @@ -952,13 +965,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): prev_event_id=prev_event_id, prev_event_pos=( PersistedEventPosition( - instance_name=prev_instance_name, + # If instance_name is null we default to "master" + instance_name=prev_instance_name or "master", stream=prev_stream_ordering, ) - if ( - prev_instance_name is not None - and prev_stream_ordering is not None - ) + if (prev_stream_ordering is not None) else None ), prev_membership=prev_membership, @@ -1270,7 +1281,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): stream_ordering=stream_ordering, ): return event_id, PersistedEventPosition( - instance_name, stream_ordering + # If instance_name is null we default to "master" + instance_name or "master", + stream_ordering, ) return None diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 674dd4fb54..77aafa492e 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -210,7 +210,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): ) # Blow away caches (supported room versions can only change due to a restart). - self.store.get_rooms_for_user_with_stream_ordering.invalidate_all() self.store.get_rooms_for_user.invalidate_all() self.store._get_event_cache.clear() self.store._event_ref.clear() diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py index a56f1e2d5d..1afe523d02 100644 --- a/tests/replication/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -30,19 +30,16 @@ from synapse.api.constants import ReceiptTypes from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, make_event_from_dict from synapse.events.snapshot import EventContext -from synapse.handlers.room import RoomEventSource from synapse.server import HomeServer from synapse.storage.databases.main.event_push_actions import ( NotifCounts, RoomNotifCounts, ) from synapse.storage.databases.main.events_worker import EventsWorkerStore -from synapse.storage.roommember import GetRoomsForUserWithStreamOrdering, RoomsForUser +from synapse.storage.roommember import RoomsForUser from synapse.types import PersistedEventPosition from synapse.util import Clock -from tests.server import FakeTransport - from ._base import BaseWorkerStoreTestCase USER_ID = "@feeling:test" @@ -221,125 +218,6 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): ), ) - def test_get_rooms_for_user_with_stream_ordering(self) -> None: - """Check that the cache on get_rooms_for_user_with_stream_ordering is invalidated - by rows in the events stream - """ - self.persist(type="m.room.create", key="", creator=USER_ID) - self.persist(type="m.room.member", key=USER_ID, membership="join") - self.replicate() - self.check("get_rooms_for_user_with_stream_ordering", (USER_ID_2,), set()) - - j2 = self.persist( - type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" - ) - assert j2.internal_metadata.instance_name is not None - assert j2.internal_metadata.stream_ordering is not None - self.replicate() - - expected_pos = PersistedEventPosition( - j2.internal_metadata.instance_name, j2.internal_metadata.stream_ordering - ) - self.check( - "get_rooms_for_user_with_stream_ordering", - (USER_ID_2,), - {GetRoomsForUserWithStreamOrdering(ROOM_ID, expected_pos)}, - ) - - def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist( - self, - ) -> None: - """Check that current_state invalidation happens correctly with multiple events - in the persistence batch. - - This test attempts to reproduce a race condition between the event persistence - loop and a worker-based Sync handler. - - The problem occurred when the master persisted several events in one batch. It - only updates the current_state at the end of each batch, so the obvious thing - to do is then to issue a current_state_delta stream update corresponding to the - last stream_id in the batch. - - However, that raises the possibility that a worker will see the replication - notification for a join event before the current_state caches are invalidated. - - The test involves: - * creating a join and a message event for a user, and persisting them in the - same batch - - * controlling the replication stream so that updates are sent gradually - - * between each bunch of replication updates, check that we see a consistent - snapshot of the state. - """ - self.persist(type="m.room.create", key="", creator=USER_ID) - self.persist(type="m.room.member", key=USER_ID, membership="join") - self.replicate() - self.check("get_rooms_for_user_with_stream_ordering", (USER_ID_2,), set()) - - # limit the replication rate - repl_transport = self._server_transport - assert isinstance(repl_transport, FakeTransport) - repl_transport.autoflush = False - - # build the join and message events and persist them in the same batch. - logger.info("----- build test events ------") - j2, j2ctx = self.build_event( - type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" - ) - msg, msgctx = self.build_event() - self.get_success(self.persistance.persist_events([(j2, j2ctx), (msg, msgctx)])) - self.replicate() - assert j2.internal_metadata.instance_name is not None - assert j2.internal_metadata.stream_ordering is not None - - event_source = RoomEventSource(self.hs) - event_source.store = self.worker_store - current_token = event_source.get_current_key() - - # gradually stream out the replication - while repl_transport.buffer: - logger.info("------ flush ------") - repl_transport.flush(30) - self.pump(0) - - prev_token = current_token - current_token = event_source.get_current_key() - - # attempt to replicate the behaviour of the sync handler. - # - # First, we get a list of the rooms we are joined to - joined_rooms = self.get_success( - self.worker_store.get_rooms_for_user_with_stream_ordering(USER_ID_2) - ) - - # Then, we get a list of the events since the last sync - membership_changes = self.get_success( - self.worker_store.get_membership_changes_for_user( - USER_ID_2, prev_token, current_token - ) - ) - - logger.info( - "%s->%s: joined_rooms=%r membership_changes=%r", - prev_token, - current_token, - joined_rooms, - membership_changes, - ) - - # the membership change is only any use to us if the room is in the - # joined_rooms list. - if membership_changes: - expected_pos = PersistedEventPosition( - j2.internal_metadata.instance_name, - j2.internal_metadata.stream_ordering, - ) - self.assertEqual( - joined_rooms, - {GetRoomsForUserWithStreamOrdering(ROOM_ID, expected_pos)}, - ) - event_id = 0 def persist(self, backfill: bool = False, **kwargs: Any) -> EventBase: From b15e17ce6e846a9fc95236f029c31be5d2aeb656 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Jul 2024 09:31:42 +0100 Subject: [PATCH 011/332] Bump pillow from 10.3.0 to 10.4.0 (#17412) --- poetry.lock | 153 ++++++++++++++++++++++++++++------------------------ 1 file changed, 82 insertions(+), 71 deletions(-) diff --git a/poetry.lock b/poetry.lock index 301205ec83..1425969a1d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1534,84 +1534,95 @@ files = [ [[package]] name = "pillow" -version = "10.3.0" +version = "10.4.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, - {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, - {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, - {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, - {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, - {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, - {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, - {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, - {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, - {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, - {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, - {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, - {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, - {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, - {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, - {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, - {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] From 1353fb33470d1799b6b76b4a4ffb18a166809d7c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Jul 2024 09:31:57 +0100 Subject: [PATCH 012/332] Bump ijson from 3.2.3 to 3.3.0 (#17413) --- poetry.lock | 185 +++++++++++++++++++++++++++------------------------- 1 file changed, 95 insertions(+), 90 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1425969a1d..19393bb6b3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -685,100 +685,105 @@ files = [ [[package]] name = "ijson" -version = "3.2.3" +version = "3.3.0" description = "Iterative JSON parser with standard Python iterator interfaces" optional = false python-versions = "*" files = [ - {file = "ijson-3.2.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0a4ae076bf97b0430e4e16c9cb635a6b773904aec45ed8dcbc9b17211b8569ba"}, - {file = "ijson-3.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cfced0a6ec85916eb8c8e22415b7267ae118eaff2a860c42d2cc1261711d0d31"}, - {file = "ijson-3.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b9d1141cfd1e6d6643aa0b4876730d0d28371815ce846d2e4e84a2d4f471cf3"}, - {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0a27db6454edd6013d40a956d008361aac5bff375a9c04ab11fc8c214250b5"}, - {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0d526ccb335c3c13063c273637d8611f32970603dfb182177b232d01f14c23"}, - {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:545a30b3659df2a3481593d30d60491d1594bc8005f99600e1bba647bb44cbb5"}, - {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9680e37a10fedb3eab24a4a7e749d8a73f26f1a4c901430e7aa81b5da15f7307"}, - {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2a80c0bb1053055d1599e44dc1396f713e8b3407000e6390add72d49633ff3bb"}, - {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f05ed49f434ce396ddcf99e9fd98245328e99f991283850c309f5e3182211a79"}, - {file = "ijson-3.2.3-cp310-cp310-win32.whl", hash = "sha256:b4eb2304573c9fdf448d3fa4a4fdcb727b93002b5c5c56c14a5ffbbc39f64ae4"}, - {file = "ijson-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:923131f5153c70936e8bd2dd9dcfcff43c67a3d1c789e9c96724747423c173eb"}, - {file = "ijson-3.2.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:904f77dd3d87736ff668884fe5197a184748eb0c3e302ded61706501d0327465"}, - {file = "ijson-3.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0974444c1f416e19de1e9f567a4560890095e71e81623c509feff642114c1e53"}, - {file = "ijson-3.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1a4b8eb69b6d7b4e94170aa991efad75ba156b05f0de2a6cd84f991def12ff9"}, - {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d052417fd7ce2221114f8d3b58f05a83c1a2b6b99cafe0b86ac9ed5e2fc889df"}, - {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b8064a85ec1b0beda7dd028e887f7112670d574db606f68006c72dd0bb0e0e2"}, - {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaac293853f1342a8d2a45ac1f723c860f700860e7743fb97f7b76356df883a8"}, - {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6c32c18a934c1dc8917455b0ce478fd7a26c50c364bd52c5a4fb0fc6bb516af7"}, - {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:713a919e0220ac44dab12b5fed74f9130f3480e55e90f9d80f58de129ea24f83"}, - {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a3a6a2fbbe7550ffe52d151cf76065e6b89cfb3e9d0463e49a7e322a25d0426"}, - {file = "ijson-3.2.3-cp311-cp311-win32.whl", hash = "sha256:6a4db2f7fb9acfb855c9ae1aae602e4648dd1f88804a0d5cfb78c3639bcf156c"}, - {file = "ijson-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:ccd6be56335cbb845f3d3021b1766299c056c70c4c9165fb2fbe2d62258bae3f"}, - {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:055b71bbc37af5c3c5861afe789e15211d2d3d06ac51ee5a647adf4def19c0ea"}, - {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c075a547de32f265a5dd139ab2035900fef6653951628862e5cdce0d101af557"}, - {file = "ijson-3.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:457f8a5fc559478ac6b06b6d37ebacb4811f8c5156e997f0d87d708b0d8ab2ae"}, - {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9788f0c915351f41f0e69ec2618b81ebfcf9f13d9d67c6d404c7f5afda3e4afb"}, - {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa234ab7a6a33ed51494d9d2197fb96296f9217ecae57f5551a55589091e7853"}, - {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd0dc5da4f9dc6d12ab6e8e0c57d8b41d3c8f9ceed31a99dae7b2baf9ea769a"}, - {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c6beb80df19713e39e68dc5c337b5c76d36ccf69c30b79034634e5e4c14d6904"}, - {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a2973ce57afb142d96f35a14e9cfec08308ef178a2c76b8b5e1e98f3960438bf"}, - {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:105c314fd624e81ed20f925271ec506523b8dd236589ab6c0208b8707d652a0e"}, - {file = "ijson-3.2.3-cp312-cp312-win32.whl", hash = "sha256:ac44781de5e901ce8339352bb5594fcb3b94ced315a34dbe840b4cff3450e23b"}, - {file = "ijson-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:0567e8c833825b119e74e10a7c29761dc65fcd155f5d4cb10f9d3b8916ef9912"}, - {file = "ijson-3.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eeb286639649fb6bed37997a5e30eefcacddac79476d24128348ec890b2a0ccb"}, - {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:396338a655fb9af4ac59dd09c189885b51fa0eefc84d35408662031023c110d1"}, - {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e0243d166d11a2a47c17c7e885debf3b19ed136be2af1f5d1c34212850236ac"}, - {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85afdb3f3a5d0011584d4fa8e6dccc5936be51c27e84cd2882fe904ca3bd04c5"}, - {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4fc35d569eff3afa76bfecf533f818ecb9390105be257f3f83c03204661ace70"}, - {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:455d7d3b7a6aacfb8ab1ebcaf697eedf5be66e044eac32508fccdc633d995f0e"}, - {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:c63f3d57dbbac56cead05b12b81e8e1e259f14ce7f233a8cbe7fa0996733b628"}, - {file = "ijson-3.2.3-cp36-cp36m-win32.whl", hash = "sha256:a4d7fe3629de3ecb088bff6dfe25f77be3e8261ed53d5e244717e266f8544305"}, - {file = "ijson-3.2.3-cp36-cp36m-win_amd64.whl", hash = "sha256:96190d59f015b5a2af388a98446e411f58ecc6a93934e036daa75f75d02386a0"}, - {file = "ijson-3.2.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:35194e0b8a2bda12b4096e2e792efa5d4801a0abb950c48ade351d479cd22ba5"}, - {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1053fb5f0b010ee76ca515e6af36b50d26c1728ad46be12f1f147a835341083"}, - {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:211124cff9d9d139dd0dfced356f1472860352c055d2481459038b8205d7d742"}, - {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92dc4d48e9f6a271292d6079e9fcdce33c83d1acf11e6e12696fb05c5889fe74"}, - {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3dcc33ee56f92a77f48776014ddb47af67c33dda361e84371153c4f1ed4434e1"}, - {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98c6799925a5d1988da4cd68879b8eeab52c6e029acc45e03abb7921a4715c4b"}, - {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4252e48c95cd8ceefc2caade310559ab61c37d82dfa045928ed05328eb5b5f65"}, - {file = "ijson-3.2.3-cp37-cp37m-win32.whl", hash = "sha256:644f4f03349ff2731fd515afd1c91b9e439e90c9f8c28292251834154edbffca"}, - {file = "ijson-3.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:ba33c764afa9ecef62801ba7ac0319268a7526f50f7601370d9f8f04e77fc02b"}, - {file = "ijson-3.2.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4b2ec8c2a3f1742cbd5f36b65e192028e541b5fd8c7fd97c1fc0ca6c427c704a"}, - {file = "ijson-3.2.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dc357da4b4ebd8903e77dbcc3ce0555ee29ebe0747c3c7f56adda423df8ec89"}, - {file = "ijson-3.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bcc51c84bb220ac330122468fe526a7777faa6464e3b04c15b476761beea424f"}, - {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8d54b624629f9903005c58d9321a036c72f5c212701bbb93d1a520ecd15e370"}, - {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6ea7c7e3ec44742e867c72fd750c6a1e35b112f88a917615332c4476e718d40"}, - {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:916acdc5e504f8b66c3e287ada5d4b39a3275fc1f2013c4b05d1ab9933671a6c"}, - {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81815b4184b85ce124bfc4c446d5f5e5e643fc119771c5916f035220ada29974"}, - {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b49fd5fe1cd9c1c8caf6c59f82b08117dd6bea2ec45b641594e25948f48f4169"}, - {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:86b3c91fdcb8ffb30556c9669930f02b7642de58ca2987845b04f0d7fe46d9a8"}, - {file = "ijson-3.2.3-cp38-cp38-win32.whl", hash = "sha256:a729b0c8fb935481afe3cf7e0dadd0da3a69cc7f145dbab8502e2f1e01d85a7c"}, - {file = "ijson-3.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:d34e049992d8a46922f96483e96b32ac4c9cffd01a5c33a928e70a283710cd58"}, - {file = "ijson-3.2.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9c2a12dcdb6fa28f333bf10b3a0f80ec70bc45280d8435be7e19696fab2bc706"}, - {file = "ijson-3.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1844c5b57da21466f255a0aeddf89049e730d7f3dfc4d750f0e65c36e6a61a7c"}, - {file = "ijson-3.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ec3e5ff2515f1c40ef6a94983158e172f004cd643b9e4b5302017139b6c96e4"}, - {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46bafb1b9959872a1f946f8dd9c6f1a30a970fc05b7bfae8579da3f1f988e598"}, - {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab4db9fee0138b60e31b3c02fff8a4c28d7b152040553b6a91b60354aebd4b02"}, - {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4bc87e69d1997c6a55fff5ee2af878720801ff6ab1fb3b7f94adda050651e37"}, - {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e9fd906f0c38e9f0bfd5365e1bed98d649f506721f76bb1a9baa5d7374f26f19"}, - {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e84d27d1acb60d9102728d06b9650e5b7e5cb0631bd6e3dfadba8fb6a80d6c2f"}, - {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2cc04fc0a22bb945cd179f614845c8b5106c0b3939ee0d84ce67c7a61ac1a936"}, - {file = "ijson-3.2.3-cp39-cp39-win32.whl", hash = "sha256:e641814793a037175f7ec1b717ebb68f26d89d82cfd66f36e588f32d7e488d5f"}, - {file = "ijson-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:6bd3e7e91d031f1e8cea7ce53f704ab74e61e505e8072467e092172422728b22"}, - {file = "ijson-3.2.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:06f9707da06a19b01013f8c65bf67db523662a9b4a4ff027e946e66c261f17f0"}, - {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be8495f7c13fa1f622a2c6b64e79ac63965b89caf664cc4e701c335c652d15f2"}, - {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7596b42f38c3dcf9d434dddd50f46aeb28e96f891444c2b4b1266304a19a2c09"}, - {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbac4e9609a1086bbad075beb2ceec486a3b138604e12d2059a33ce2cba93051"}, - {file = "ijson-3.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:db2d6341f9cb538253e7fe23311d59252f124f47165221d3c06a7ed667ecd595"}, - {file = "ijson-3.2.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fa8b98be298efbb2588f883f9953113d8a0023ab39abe77fe734b71b46b1220a"}, - {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:674e585361c702fad050ab4c153fd168dc30f5980ef42b64400bc84d194e662d"}, - {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd12e42b9cb9c0166559a3ffa276b4f9fc9d5b4c304e5a13668642d34b48b634"}, - {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d31e0d771d82def80cd4663a66de277c3b44ba82cd48f630526b52f74663c639"}, - {file = "ijson-3.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ce4c70c23521179d6da842bb9bc2e36bb9fad1e0187e35423ff0f282890c9ca"}, - {file = "ijson-3.2.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39f551a6fbeed4433c85269c7c8778e2aaea2501d7ebcb65b38f556030642c17"}, - {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b14d322fec0de7af16f3ef920bf282f0dd747200b69e0b9628117f381b7775b"}, - {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7851a341429b12d4527ca507097c959659baf5106c7074d15c17c387719ffbcd"}, - {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db3bf1b42191b5cc9b6441552fdcb3b583594cb6b19e90d1578b7cbcf80d0fae"}, - {file = "ijson-3.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6f662dc44362a53af3084d3765bb01cd7b4734d1f484a6095cad4cb0cbfe5374"}, - {file = "ijson-3.2.3.tar.gz", hash = "sha256:10294e9bf89cb713da05bc4790bdff616610432db561964827074898e174f917"}, + {file = "ijson-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7f7a5250599c366369fbf3bc4e176f5daa28eb6bc7d6130d02462ed335361675"}, + {file = "ijson-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f87a7e52f79059f9c58f6886c262061065eb6f7554a587be7ed3aa63e6b71b34"}, + {file = "ijson-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b73b493af9e947caed75d329676b1b801d673b17481962823a3e55fe529c8b8b"}, + {file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5576415f3d76290b160aa093ff968f8bf6de7d681e16e463a0134106b506f49"}, + {file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e9ffe358d5fdd6b878a8a364e96e15ca7ca57b92a48f588378cef315a8b019e"}, + {file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8643c255a25824ddd0895c59f2319c019e13e949dc37162f876c41a283361527"}, + {file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:df3ab5e078cab19f7eaeef1d5f063103e1ebf8c26d059767b26a6a0ad8b250a3"}, + {file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3dc1fb02c6ed0bae1b4bf96971258bf88aea72051b6e4cebae97cff7090c0607"}, + {file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e9afd97339fc5a20f0542c971f90f3ca97e73d3050cdc488d540b63fae45329a"}, + {file = "ijson-3.3.0-cp310-cp310-win32.whl", hash = "sha256:844c0d1c04c40fd1b60f148dc829d3f69b2de789d0ba239c35136efe9a386529"}, + {file = "ijson-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:d654d045adafdcc6c100e8e911508a2eedbd2a1b5f93f930ba13ea67d7704ee9"}, + {file = "ijson-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:501dce8eaa537e728aa35810656aa00460a2547dcb60937c8139f36ec344d7fc"}, + {file = "ijson-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:658ba9cad0374d37b38c9893f4864f284cdcc7d32041f9808fba8c7bcaadf134"}, + {file = "ijson-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2636cb8c0f1023ef16173f4b9a233bcdb1df11c400c603d5f299fac143ca8d70"}, + {file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd174b90db68c3bcca273e9391934a25d76929d727dc75224bf244446b28b03b"}, + {file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:97a9aea46e2a8371c4cf5386d881de833ed782901ac9f67ebcb63bb3b7d115af"}, + {file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c594c0abe69d9d6099f4ece17763d53072f65ba60b372d8ba6de8695ce6ee39e"}, + {file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8e0ff16c224d9bfe4e9e6bd0395826096cda4a3ef51e6c301e1b61007ee2bd24"}, + {file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0015354011303175eae7e2ef5136414e91de2298e5a2e9580ed100b728c07e51"}, + {file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034642558afa57351a0ffe6de89e63907c4cf6849070cc10a3b2542dccda1afe"}, + {file = "ijson-3.3.0-cp311-cp311-win32.whl", hash = "sha256:192e4b65495978b0bce0c78e859d14772e841724d3269fc1667dc6d2f53cc0ea"}, + {file = "ijson-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:72e3488453754bdb45c878e31ce557ea87e1eb0f8b4fc610373da35e8074ce42"}, + {file = "ijson-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:988e959f2f3d59ebd9c2962ae71b97c0df58323910d0b368cc190ad07429d1bb"}, + {file = "ijson-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b2f73f0d0fce5300f23a1383d19b44d103bb113b57a69c36fd95b7c03099b181"}, + {file = "ijson-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0ee57a28c6bf523d7cb0513096e4eb4dac16cd935695049de7608ec110c2b751"}, + {file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0155a8f079c688c2ccaea05de1ad69877995c547ba3d3612c1c336edc12a3a5"}, + {file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ab00721304af1ae1afa4313ecfa1bf16b07f55ef91e4a5b93aeaa3e2bd7917c"}, + {file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40ee3821ee90be0f0e95dcf9862d786a7439bd1113e370736bfdf197e9765bfb"}, + {file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3b6987a0bc3e6d0f721b42c7a0198ef897ae50579547b0345f7f02486898f5"}, + {file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:63afea5f2d50d931feb20dcc50954e23cef4127606cc0ecf7a27128ed9f9a9e6"}, + {file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b5c3e285e0735fd8c5a26d177eca8b52512cdd8687ca86ec77a0c66e9c510182"}, + {file = "ijson-3.3.0-cp312-cp312-win32.whl", hash = "sha256:907f3a8674e489abdcb0206723e5560a5cb1fa42470dcc637942d7b10f28b695"}, + {file = "ijson-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8f890d04ad33262d0c77ead53c85f13abfb82f2c8f078dfbf24b78f59534dfdd"}, + {file = "ijson-3.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b9d85a02e77ee8ea6d9e3fd5d515bcc3d798d9c1ea54817e5feb97a9bc5d52fe"}, + {file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6576cdc36d5a09b0c1a3d81e13a45d41a6763188f9eaae2da2839e8a4240bce"}, + {file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5589225c2da4bb732c9c370c5961c39a6db72cf69fb2a28868a5413ed7f39e6"}, + {file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad04cf38164d983e85f9cba2804566c0160b47086dcca4cf059f7e26c5ace8ca"}, + {file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:a3b730ef664b2ef0e99dec01b6573b9b085c766400af363833e08ebc1e38eb2f"}, + {file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:4690e3af7b134298055993fcbea161598d23b6d3ede11b12dca6815d82d101d5"}, + {file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:aaa6bfc2180c31a45fac35d40e3312a3d09954638ce0b2e9424a88e24d262a13"}, + {file = "ijson-3.3.0-cp36-cp36m-win32.whl", hash = "sha256:44367090a5a876809eb24943f31e470ba372aaa0d7396b92b953dda953a95d14"}, + {file = "ijson-3.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7e2b3e9ca957153557d06c50a26abaf0d0d6c0ddf462271854c968277a6b5372"}, + {file = "ijson-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:47c144117e5c0e2babb559bc8f3f76153863b8dd90b2d550c51dab5f4b84a87f"}, + {file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29ce02af5fbf9ba6abb70765e66930aedf73311c7d840478f1ccecac53fefbf3"}, + {file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ac6c3eeed25e3e2cb9b379b48196413e40ac4e2239d910bb33e4e7f6c137745"}, + {file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d92e339c69b585e7b1d857308ad3ca1636b899e4557897ccd91bb9e4a56c965b"}, + {file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:8c85447569041939111b8c7dbf6f8fa7a0eb5b2c4aebb3c3bec0fb50d7025121"}, + {file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:542c1e8fddf082159a5d759ee1412c73e944a9a2412077ed00b303ff796907dc"}, + {file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:30cfea40936afb33b57d24ceaf60d0a2e3d5c1f2335ba2623f21d560737cc730"}, + {file = "ijson-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:6b661a959226ad0d255e49b77dba1d13782f028589a42dc3172398dd3814c797"}, + {file = "ijson-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:0b003501ee0301dbf07d1597482009295e16d647bb177ce52076c2d5e64113e0"}, + {file = "ijson-3.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3e8d8de44effe2dbd0d8f3eb9840344b2d5b4cc284a14eb8678aec31d1b6bea8"}, + {file = "ijson-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9cd5c03c63ae06d4f876b9844c5898d0044c7940ff7460db9f4cd984ac7862b5"}, + {file = "ijson-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04366e7e4a4078d410845e58a2987fd9c45e63df70773d7b6e87ceef771b51ee"}, + {file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de7c1ddb80fa7a3ab045266dca169004b93f284756ad198306533b792774f10a"}, + {file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8851584fb931cffc0caa395f6980525fd5116eab8f73ece9d95e6f9c2c326c4c"}, + {file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdcfc88347fd981e53c33d832ce4d3e981a0d696b712fbcb45dcc1a43fe65c65"}, + {file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3917b2b3d0dbbe3296505da52b3cb0befbaf76119b2edaff30bd448af20b5400"}, + {file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:e10c14535abc7ddf3fd024aa36563cd8ab5d2bb6234a5d22c77c30e30fa4fb2b"}, + {file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3aba5c4f97f4e2ce854b5591a8b0711ca3b0c64d1b253b04ea7b004b0a197ef6"}, + {file = "ijson-3.3.0-cp38-cp38-win32.whl", hash = "sha256:b325f42e26659df1a0de66fdb5cde8dd48613da9c99c07d04e9fb9e254b7ee1c"}, + {file = "ijson-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:ff835906f84451e143f31c4ce8ad73d83ef4476b944c2a2da91aec8b649570e1"}, + {file = "ijson-3.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3c556f5553368dff690c11d0a1fb435d4ff1f84382d904ccc2dc53beb27ba62e"}, + {file = "ijson-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e4396b55a364a03ff7e71a34828c3ed0c506814dd1f50e16ebed3fc447d5188e"}, + {file = "ijson-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6850ae33529d1e43791b30575070670070d5fe007c37f5d06aebc1dd152ab3f"}, + {file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36aa56d68ea8def26778eb21576ae13f27b4a47263a7a2581ab2ef58b8de4451"}, + {file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7ec759c4a0fc820ad5dc6a58e9c391e7b16edcb618056baedbedbb9ea3b1524"}, + {file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b51bab2c4e545dde93cb6d6bb34bf63300b7cd06716f195dd92d9255df728331"}, + {file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:92355f95a0e4da96d4c404aa3cff2ff033f9180a9515f813255e1526551298c1"}, + {file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8795e88adff5aa3c248c1edce932db003d37a623b5787669ccf205c422b91e4a"}, + {file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8f83f553f4cde6d3d4eaf58ec11c939c94a0ec545c5b287461cafb184f4b3a14"}, + {file = "ijson-3.3.0-cp39-cp39-win32.whl", hash = "sha256:ead50635fb56577c07eff3e557dac39533e0fe603000684eea2af3ed1ad8f941"}, + {file = "ijson-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:c8a9befb0c0369f0cf5c1b94178d0d78f66d9cebb9265b36be6e4f66236076b8"}, + {file = "ijson-3.3.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2af323a8aec8a50fa9effa6d640691a30a9f8c4925bd5364a1ca97f1ac6b9b5c"}, + {file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f64f01795119880023ba3ce43072283a393f0b90f52b66cc0ea1a89aa64a9ccb"}, + {file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a716e05547a39b788deaf22725490855337fc36613288aa8ae1601dc8c525553"}, + {file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:473f5d921fadc135d1ad698e2697025045cd8ed7e5e842258295012d8a3bc702"}, + {file = "ijson-3.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd26b396bc3a1e85f4acebeadbf627fa6117b97f4c10b177d5779577c6607744"}, + {file = "ijson-3.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:25fd49031cdf5fd5f1fd21cb45259a64dad30b67e64f745cc8926af1c8c243d3"}, + {file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b72178b1e565d06ab19319965022b36ef41bcea7ea153b32ec31194bec032a2"}, + {file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d0b6b637d05dbdb29d0bfac2ed8425bb369e7af5271b0cc7cf8b801cb7360c2"}, + {file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5378d0baa59ae422905c5f182ea0fd74fe7e52a23e3821067a7d58c8306b2191"}, + {file = "ijson-3.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:99f5c8ab048ee4233cc4f2b461b205cbe01194f6201018174ac269bf09995749"}, + {file = "ijson-3.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:45ff05de889f3dc3d37a59d02096948ce470699f2368b32113954818b21aa74a"}, + {file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1efb521090dd6cefa7aafd120581947b29af1713c902ff54336b7c7130f04c47"}, + {file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87c727691858fd3a1c085d9980d12395517fcbbf02c69fbb22dede8ee03422da"}, + {file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0420c24e50389bc251b43c8ed379ab3e3ba065ac8262d98beb6735ab14844460"}, + {file = "ijson-3.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8fdf3721a2aa7d96577970f5604bd81f426969c1822d467f07b3d844fa2fecc7"}, + {file = "ijson-3.3.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:891f95c036df1bc95309951940f8eea8537f102fa65715cdc5aae20b8523813b"}, + {file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed1336a2a6e5c427f419da0154e775834abcbc8ddd703004108121c6dd9eba9d"}, + {file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0c819f83e4f7b7f7463b2dc10d626a8be0c85fbc7b3db0edc098c2b16ac968e"}, + {file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33afc25057377a6a43c892de34d229a86f89ea6c4ca3dd3db0dcd17becae0dbb"}, + {file = "ijson-3.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7914d0cf083471856e9bc2001102a20f08e82311dfc8cf1a91aa422f9414a0d6"}, + {file = "ijson-3.3.0.tar.gz", hash = "sha256:7f172e6ba1bee0d4c8f8ebd639577bfe429dee0f3f96775a067b8bae4492d8a0"}, ] [[package]] From 0ed1c64c835eb0354930d53f8c19a3dbacb68869 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Jul 2024 10:25:44 +0100 Subject: [PATCH 013/332] Fix `/versions` requests (#17410) We need it to work on workers and allow guest access. Broke by #17392 --- changelog.d/17410.misc | 1 + synapse/app/generic_worker.py | 4 ++++ synapse/rest/client/versions.py | 7 ++++++- 3 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17410.misc diff --git a/changelog.d/17410.misc b/changelog.d/17410.misc new file mode 100644 index 0000000000..76e3976e28 --- /dev/null +++ b/changelog.d/17410.misc @@ -0,0 +1 @@ +Finish up work to allow per-user feature flags. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 599f95466b..248622fa92 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -74,6 +74,9 @@ from synapse.storage.databases.main.event_push_actions import ( EventPushActionsWorkerStore, ) from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.storage.databases.main.experimental_features import ( + ExperimentalFeaturesStore, +) from synapse.storage.databases.main.filtering import FilteringWorkerStore from synapse.storage.databases.main.keys import KeyStore from synapse.storage.databases.main.lock import LockStore @@ -155,6 +158,7 @@ class GenericWorkerStore( LockStore, SessionStore, TaskSchedulerWorkerStore, + ExperimentalFeaturesStore, ): # Properties that multiple storage classes define. Tell mypy what the # expected type is. diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 84cf388bd4..75df684416 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -66,7 +66,12 @@ class VersionsRestServlet(RestServlet): msc3881_enabled = self.config.experimental.msc3881_enabled if self.auth.has_access_token(request): - requester = await self.auth.get_user_by_req(request) + requester = await self.auth.get_user_by_req( + request, + allow_guest=True, + allow_locked=True, + allow_expired=True, + ) user_id = requester.user.to_string() msc3881_enabled = await self.store.is_feature_enabled( From abb1384502f66ddde3fd0db844c4e719b01023ff Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 9 Jul 2024 11:51:51 +0200 Subject: [PATCH 014/332] 1.111.0rc1 --- CHANGES.md | 54 +++++++++++++++++++++++++++++++++++++++ changelog.d/17318.misc | 1 - changelog.d/17320.feature | 1 - changelog.d/17337.feature | 1 - changelog.d/17342.feature | 1 - changelog.d/17356.doc | 1 - changelog.d/17362.bugfix | 1 - changelog.d/17363.misc | 1 - changelog.d/17365.feature | 1 - changelog.d/17367.misc | 1 - changelog.d/17379.doc | 1 - changelog.d/17381.misc | 1 - changelog.d/17388.feature | 3 --- changelog.d/17390.misc | 1 - changelog.d/17392.misc | 1 - changelog.d/17393.misc | 1 - changelog.d/17395.feature | 1 - changelog.d/17398.bugfix | 1 - changelog.d/17399.doc | 1 - changelog.d/17400.feature | 1 - changelog.d/17403.feature | 1 - changelog.d/17406.misc | 1 - changelog.d/17410.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 25 files changed, 61 insertions(+), 25 deletions(-) delete mode 100644 changelog.d/17318.misc delete mode 100644 changelog.d/17320.feature delete mode 100644 changelog.d/17337.feature delete mode 100644 changelog.d/17342.feature delete mode 100644 changelog.d/17356.doc delete mode 100644 changelog.d/17362.bugfix delete mode 100644 changelog.d/17363.misc delete mode 100644 changelog.d/17365.feature delete mode 100644 changelog.d/17367.misc delete mode 100644 changelog.d/17379.doc delete mode 100644 changelog.d/17381.misc delete mode 100644 changelog.d/17388.feature delete mode 100644 changelog.d/17390.misc delete mode 100644 changelog.d/17392.misc delete mode 100644 changelog.d/17393.misc delete mode 100644 changelog.d/17395.feature delete mode 100644 changelog.d/17398.bugfix delete mode 100644 changelog.d/17399.doc delete mode 100644 changelog.d/17400.feature delete mode 100644 changelog.d/17403.feature delete mode 100644 changelog.d/17406.misc delete mode 100644 changelog.d/17410.misc diff --git a/CHANGES.md b/CHANGES.md index a40aa26d78..ba9be2a9a7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,57 @@ +# Synapse 1.111.0rc1 (2024-07-09) + +### Features + +- Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17320](https://github.com/element-hq/synapse/issues/17320)) +- Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17337](https://github.com/element-hq/synapse/issues/17337)) +- Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17342](https://github.com/element-hq/synapse/issues/17342)) +- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding _matrix/client/v1/media/download endpoint. ([\#17365](https://github.com/element-hq/synapse/issues/17365)) +- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md) + by adding `_matrix/client/v1/media/thumbnail`, `_matrix/federation/v1/media/thumbnail` endpoints and stabilizing the + remaining `_matrix/client/v1/media` endpoints. ([\#17388](https://github.com/element-hq/synapse/issues/17388)) +- Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17395](https://github.com/element-hq/synapse/issues/17395)) +- Forget all of a user's rooms upon deactivation, enabling future purges. ([\#17400](https://github.com/element-hq/synapse/issues/17400)) +- Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). ([\#17403](https://github.com/element-hq/synapse/issues/17403)) +- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow overriding the introspection endpoint. ([\#17406](https://github.com/element-hq/synapse/issues/17406)) + +### Bugfixes + +- Fix rare race which causes no new to-device messages to be received from remote server. ([\#17362](https://github.com/element-hq/synapse/issues/17362)) +- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. ([\#17398](https://github.com/element-hq/synapse/issues/17398)) + +### Improved Documentation + +- Clarify `url_preview_url_blacklist` is a usability feature. ([\#17356](https://github.com/element-hq/synapse/issues/17356)) +- Fix broken links in README. ([\#17379](https://github.com/element-hq/synapse/issues/17379)) +- Clarify that changelog content *and file extension* need to match in order for entries to merge. ([\#17399](https://github.com/element-hq/synapse/issues/17399)) + +### Internal Changes + +- Make the release script create a release branch for Complement as well. ([\#17318](https://github.com/element-hq/synapse/issues/17318)) +- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363)) +- Add CI check for the README. ([\#17367](https://github.com/element-hq/synapse/issues/17367)) +- Fix linting errors from new `ruff` version. ([\#17381](https://github.com/element-hq/synapse/issues/17381)) +- Fix building debian packages on non-clean checkouts. ([\#17390](https://github.com/element-hq/synapse/issues/17390)) +- Finish up work to allow per-user feature flags. ([\#17392](https://github.com/element-hq/synapse/issues/17392), [\#17410](https://github.com/element-hq/synapse/issues/17410)) +- Allow enabling sliding sync per-user. ([\#17393](https://github.com/element-hq/synapse/issues/17393)) + + + +### Updates to locked dependencies + +* Bump certifi from 2023.7.22 to 2024.7.4. ([\#17404](https://github.com/element-hq/synapse/issues/17404)) +* Bump cryptography from 42.0.7 to 42.0.8. ([\#17382](https://github.com/element-hq/synapse/issues/17382)) +* Bump ijson from 3.2.3 to 3.3.0. ([\#17413](https://github.com/element-hq/synapse/issues/17413)) +* Bump log from 0.4.21 to 0.4.22. ([\#17384](https://github.com/element-hq/synapse/issues/17384)) +* Bump mypy-zope from 1.0.4 to 1.0.5. ([\#17414](https://github.com/element-hq/synapse/issues/17414)) +* Bump pillow from 10.3.0 to 10.4.0. ([\#17412](https://github.com/element-hq/synapse/issues/17412)) +* Bump pydantic from 2.7.1 to 2.8.2. ([\#17415](https://github.com/element-hq/synapse/issues/17415)) +* Bump ruff from 0.3.7 to 0.5.0. ([\#17381](https://github.com/element-hq/synapse/issues/17381)) +* Bump serde from 1.0.203 to 1.0.204. ([\#17409](https://github.com/element-hq/synapse/issues/17409)) +* Bump serde_json from 1.0.117 to 1.0.119. ([\#17385](https://github.com/element-hq/synapse/issues/17385)) +* Bump serde_json from 1.0.119 to 1.0.120. ([\#17408](https://github.com/element-hq/synapse/issues/17408)) +* Bump types-setuptools from 69.5.0.20240423 to 70.1.0.20240627. ([\#17380](https://github.com/element-hq/synapse/issues/17380)) + # Synapse 1.110.0 (2024-07-03) No significant changes since 1.110.0rc3. diff --git a/changelog.d/17318.misc b/changelog.d/17318.misc deleted file mode 100644 index b0b21da23b..0000000000 --- a/changelog.d/17318.misc +++ /dev/null @@ -1 +0,0 @@ -Make the release script create a release branch for Complement as well. diff --git a/changelog.d/17320.feature b/changelog.d/17320.feature deleted file mode 100644 index 1e524f3eca..0000000000 --- a/changelog.d/17320.feature +++ /dev/null @@ -1 +0,0 @@ -Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17337.feature b/changelog.d/17337.feature deleted file mode 100644 index bc8f437dbe..0000000000 --- a/changelog.d/17337.feature +++ /dev/null @@ -1 +0,0 @@ -Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17342.feature b/changelog.d/17342.feature deleted file mode 100644 index b2671ea14a..0000000000 --- a/changelog.d/17342.feature +++ /dev/null @@ -1 +0,0 @@ -Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17356.doc b/changelog.d/17356.doc deleted file mode 100644 index b393d8d147..0000000000 --- a/changelog.d/17356.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify `url_preview_url_blacklist` is a usability feature. diff --git a/changelog.d/17362.bugfix b/changelog.d/17362.bugfix deleted file mode 100644 index a91ce9fc06..0000000000 --- a/changelog.d/17362.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix rare race which causes no new to-device messages to be received from remote server. diff --git a/changelog.d/17363.misc b/changelog.d/17363.misc deleted file mode 100644 index 555e2225ba..0000000000 --- a/changelog.d/17363.misc +++ /dev/null @@ -1 +0,0 @@ -Fix uploading packages to PyPi. \ No newline at end of file diff --git a/changelog.d/17365.feature b/changelog.d/17365.feature deleted file mode 100644 index 61acc32f32..0000000000 --- a/changelog.d/17365.feature +++ /dev/null @@ -1 +0,0 @@ -Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding _matrix/client/v1/media/download endpoint. \ No newline at end of file diff --git a/changelog.d/17367.misc b/changelog.d/17367.misc deleted file mode 100644 index 361731b8ae..0000000000 --- a/changelog.d/17367.misc +++ /dev/null @@ -1 +0,0 @@ -Add CI check for the README. \ No newline at end of file diff --git a/changelog.d/17379.doc b/changelog.d/17379.doc deleted file mode 100644 index 08c2544426..0000000000 --- a/changelog.d/17379.doc +++ /dev/null @@ -1 +0,0 @@ -Fix broken links in README. diff --git a/changelog.d/17381.misc b/changelog.d/17381.misc deleted file mode 100644 index ca9830c136..0000000000 --- a/changelog.d/17381.misc +++ /dev/null @@ -1 +0,0 @@ -Fix linting errors from new `ruff` version. diff --git a/changelog.d/17388.feature b/changelog.d/17388.feature deleted file mode 100644 index f04f49f085..0000000000 --- a/changelog.d/17388.feature +++ /dev/null @@ -1,3 +0,0 @@ -Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md) -by adding `_matrix/client/v1/media/thumbnail`, `_matrix/federation/v1/media/thumbnail` endpoints and stabilizing the -remaining `_matrix/client/v1/media` endpoints. \ No newline at end of file diff --git a/changelog.d/17390.misc b/changelog.d/17390.misc deleted file mode 100644 index 6a4e344c5c..0000000000 --- a/changelog.d/17390.misc +++ /dev/null @@ -1 +0,0 @@ -Fix building debian packages on non-clean checkouts. diff --git a/changelog.d/17392.misc b/changelog.d/17392.misc deleted file mode 100644 index 76e3976e28..0000000000 --- a/changelog.d/17392.misc +++ /dev/null @@ -1 +0,0 @@ -Finish up work to allow per-user feature flags. diff --git a/changelog.d/17393.misc b/changelog.d/17393.misc deleted file mode 100644 index e131225276..0000000000 --- a/changelog.d/17393.misc +++ /dev/null @@ -1 +0,0 @@ -Allow enabling sliding sync per-user. diff --git a/changelog.d/17395.feature b/changelog.d/17395.feature deleted file mode 100644 index 0c95b9f4a9..0000000000 --- a/changelog.d/17395.feature +++ /dev/null @@ -1 +0,0 @@ -Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17398.bugfix b/changelog.d/17398.bugfix deleted file mode 100644 index 7931c431ef..0000000000 --- a/changelog.d/17398.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. diff --git a/changelog.d/17399.doc b/changelog.d/17399.doc deleted file mode 100644 index 7a3fcf24c0..0000000000 --- a/changelog.d/17399.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify that changelog content *and file extension* need to match in order for entries to merge. diff --git a/changelog.d/17400.feature b/changelog.d/17400.feature deleted file mode 100644 index 4dca90890c..0000000000 --- a/changelog.d/17400.feature +++ /dev/null @@ -1 +0,0 @@ -Forget all of a user's rooms upon deactivation, enabling future purges. \ No newline at end of file diff --git a/changelog.d/17403.feature b/changelog.d/17403.feature deleted file mode 100644 index b1868d7608..0000000000 --- a/changelog.d/17403.feature +++ /dev/null @@ -1 +0,0 @@ -Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). \ No newline at end of file diff --git a/changelog.d/17406.misc b/changelog.d/17406.misc deleted file mode 100644 index 83f34cac43..0000000000 --- a/changelog.d/17406.misc +++ /dev/null @@ -1 +0,0 @@ -MSC3861: allow overriding the introspection endpoint. diff --git a/changelog.d/17410.misc b/changelog.d/17410.misc deleted file mode 100644 index 76e3976e28..0000000000 --- a/changelog.d/17410.misc +++ /dev/null @@ -1 +0,0 @@ -Finish up work to allow per-user feature flags. diff --git a/debian/changelog b/debian/changelog index c285cc79eb..5fc9e10a29 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.111.0~rc1) stable; urgency=medium + + * New synapse release 1.111.0rc1. + + -- Synapse Packaging team Tue, 09 Jul 2024 09:49:25 +0000 + matrix-synapse-py3 (1.110.0) stable; urgency=medium * New Synapse release 1.110.0. diff --git a/pyproject.toml b/pyproject.toml index 2d1481f263..0b1f03bb81 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.110.0" +version = "1.111.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 028b103ae0d2ce82ea240b06e890a521ec2d6e6c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Jul 2024 11:04:40 +0100 Subject: [PATCH 015/332] Fix exception when failing to talk to remote server (#17411) Broke in #17381 --- changelog.d/17411.misc | 1 + synapse/federation/sender/per_destination_queue.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17411.misc diff --git a/changelog.d/17411.misc b/changelog.d/17411.misc new file mode 100644 index 0000000000..ca9830c136 --- /dev/null +++ b/changelog.d/17411.misc @@ -0,0 +1 @@ +Fix linting errors from new `ruff` version. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index b435588da0..d097e65ea7 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -322,6 +322,7 @@ class PerDestinationQueue: ) async def _transaction_transmission_loop(self) -> None: + pending_pdus: List[EventBase] = [] try: self.transmission_loop_running = True @@ -341,7 +342,7 @@ class PerDestinationQueue: self._new_data_to_send = False async with _TransactionQueueManager(self) as ( - pending_pdus, + pending_pdus, # noqa: F811 pending_edus, ): if not pending_pdus and not pending_edus: From 8e7da35402d040b522b986df139e9827aae92a20 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 9 Jul 2024 12:04:52 +0200 Subject: [PATCH 016/332] Tweak the changelog for v1.111.0rc1 Co-authored-by: Andrew Morgan --- CHANGES.md | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ba9be2a9a7..513d748bfb 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,23 +5,23 @@ - Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17320](https://github.com/element-hq/synapse/issues/17320)) - Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17337](https://github.com/element-hq/synapse/issues/17337)) - Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17342](https://github.com/element-hq/synapse/issues/17342)) -- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding _matrix/client/v1/media/download endpoint. ([\#17365](https://github.com/element-hq/synapse/issues/17365)) +- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding [`_matrix/client/v1/media/download`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediadownloadservernamemediaid) endpoint. ([\#17365](https://github.com/element-hq/synapse/issues/17365)) - Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md) - by adding `_matrix/client/v1/media/thumbnail`, `_matrix/federation/v1/media/thumbnail` endpoints and stabilizing the - remaining `_matrix/client/v1/media` endpoints. ([\#17388](https://github.com/element-hq/synapse/issues/17388)) + by adding [`_matrix/client/v1/media/thumbnail`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediathumbnailservernamemediaid), [`_matrix/federation/v1/media/thumbnail`](https://spec.matrix.org/v1.11/server-server-api/#get_matrixfederationv1mediathumbnailmediaid) endpoints and stabilizing the + remaining [`_matrix/client/v1/media`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediaconfig) endpoints. ([\#17388](https://github.com/element-hq/synapse/issues/17388)) - Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17395](https://github.com/element-hq/synapse/issues/17395)) -- Forget all of a user's rooms upon deactivation, enabling future purges. ([\#17400](https://github.com/element-hq/synapse/issues/17400)) +- Forget all of a user's rooms upon deactivation, preventing local room purges from being blocked on deactivated users. ([\#17400](https://github.com/element-hq/synapse/issues/17400)) - Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). ([\#17403](https://github.com/element-hq/synapse/issues/17403)) - [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow overriding the introspection endpoint. ([\#17406](https://github.com/element-hq/synapse/issues/17406)) ### Bugfixes -- Fix rare race which causes no new to-device messages to be received from remote server. ([\#17362](https://github.com/element-hq/synapse/issues/17362)) +- Fix rare race which caused no new to-device messages to be received from remote server. ([\#17362](https://github.com/element-hq/synapse/issues/17362)) - Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. ([\#17398](https://github.com/element-hq/synapse/issues/17398)) ### Improved Documentation -- Clarify `url_preview_url_blacklist` is a usability feature. ([\#17356](https://github.com/element-hq/synapse/issues/17356)) +- Clarify that `url_preview_url_blacklist` is a usability feature. ([\#17356](https://github.com/element-hq/synapse/issues/17356)) - Fix broken links in README. ([\#17379](https://github.com/element-hq/synapse/issues/17379)) - Clarify that changelog content *and file extension* need to match in order for entries to merge. ([\#17399](https://github.com/element-hq/synapse/issues/17399)) @@ -48,8 +48,7 @@ * Bump pydantic from 2.7.1 to 2.8.2. ([\#17415](https://github.com/element-hq/synapse/issues/17415)) * Bump ruff from 0.3.7 to 0.5.0. ([\#17381](https://github.com/element-hq/synapse/issues/17381)) * Bump serde from 1.0.203 to 1.0.204. ([\#17409](https://github.com/element-hq/synapse/issues/17409)) -* Bump serde_json from 1.0.117 to 1.0.119. ([\#17385](https://github.com/element-hq/synapse/issues/17385)) -* Bump serde_json from 1.0.119 to 1.0.120. ([\#17408](https://github.com/element-hq/synapse/issues/17408)) +* Bump serde_json from 1.0.117 to 1.0.120. ([\#17385](https://github.com/element-hq/synapse/issues/17385)), ([\#17408](https://github.com/element-hq/synapse/issues/17408)) * Bump types-setuptools from 69.5.0.20240423 to 70.1.0.20240627. ([\#17380](https://github.com/element-hq/synapse/issues/17380)) # Synapse 1.110.0 (2024-07-03) From d48061b7e6cfd2531847d6b322fc97ce6ba924e8 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 9 Jul 2024 12:26:47 +0200 Subject: [PATCH 017/332] Fix up the changelog --- CHANGES.md | 4 ++-- changelog.d/17411.misc | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/17411.misc diff --git a/CHANGES.md b/CHANGES.md index 513d748bfb..cbd8f1062e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -30,7 +30,7 @@ - Make the release script create a release branch for Complement as well. ([\#17318](https://github.com/element-hq/synapse/issues/17318)) - Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363)) - Add CI check for the README. ([\#17367](https://github.com/element-hq/synapse/issues/17367)) -- Fix linting errors from new `ruff` version. ([\#17381](https://github.com/element-hq/synapse/issues/17381)) +- Fix linting errors from new `ruff` version. ([\#17381](https://github.com/element-hq/synapse/issues/17381), [\#17411](https://github.com/element-hq/synapse/issues/17411)) - Fix building debian packages on non-clean checkouts. ([\#17390](https://github.com/element-hq/synapse/issues/17390)) - Finish up work to allow per-user feature flags. ([\#17392](https://github.com/element-hq/synapse/issues/17392), [\#17410](https://github.com/element-hq/synapse/issues/17410)) - Allow enabling sliding sync per-user. ([\#17393](https://github.com/element-hq/synapse/issues/17393)) @@ -48,7 +48,7 @@ * Bump pydantic from 2.7.1 to 2.8.2. ([\#17415](https://github.com/element-hq/synapse/issues/17415)) * Bump ruff from 0.3.7 to 0.5.0. ([\#17381](https://github.com/element-hq/synapse/issues/17381)) * Bump serde from 1.0.203 to 1.0.204. ([\#17409](https://github.com/element-hq/synapse/issues/17409)) -* Bump serde_json from 1.0.117 to 1.0.120. ([\#17385](https://github.com/element-hq/synapse/issues/17385)), ([\#17408](https://github.com/element-hq/synapse/issues/17408)) +* Bump serde_json from 1.0.117 to 1.0.120. ([\#17385](https://github.com/element-hq/synapse/issues/17385), [\#17408](https://github.com/element-hq/synapse/issues/17408)) * Bump types-setuptools from 69.5.0.20240423 to 70.1.0.20240627. ([\#17380](https://github.com/element-hq/synapse/issues/17380)) # Synapse 1.110.0 (2024-07-03) diff --git a/changelog.d/17411.misc b/changelog.d/17411.misc deleted file mode 100644 index ca9830c136..0000000000 --- a/changelog.d/17411.misc +++ /dev/null @@ -1 +0,0 @@ -Fix linting errors from new `ruff` version. From 4d6f7c0fc9a5407c14a2cabdc7f80c5fd2e7395e Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 9 Jul 2024 16:12:11 +0100 Subject: [PATCH 018/332] Route auth'd fed media requests to media repo in Complement tests (#17422) --- changelog.d/17422.misc | 1 + docker/configure_workers_and_start.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/17422.misc diff --git a/changelog.d/17422.misc b/changelog.d/17422.misc new file mode 100644 index 0000000000..1f53e3c579 --- /dev/null +++ b/changelog.d/17422.misc @@ -0,0 +1 @@ +Route authenticated federation media requests to media repository workers in Complement tests. \ No newline at end of file diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index b6690f3404..15d8d7b558 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -126,6 +126,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "^/_synapse/admin/v1/media/.*$", "^/_synapse/admin/v1/quarantine_media/.*$", "^/_matrix/client/v1/media/.*$", + "^/_matrix/federation/v1/media/.*$", ], # The first configured media worker will run the media background jobs "shared_extra_conf": { From 62d8b0361b604979d87f7b27559b9116e95f3da6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 9 Jul 2024 16:41:51 +0100 Subject: [PATCH 019/332] Note the new federated media worker endpoints in the worker docs & upgrade notes (#17421) --- changelog.d/17421.doc | 1 + docs/upgrade.md | 5 +++-- docs/workers.md | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17421.doc diff --git a/changelog.d/17421.doc b/changelog.d/17421.doc new file mode 100644 index 0000000000..e6a495e5d6 --- /dev/null +++ b/changelog.d/17421.doc @@ -0,0 +1 @@ +Document the new federation media worker endpoints in the [upgrade notes](https://element-hq.github.io/synapse/v1.111/upgrade.html) and [worker docs](https://element-hq.github.io/synapse/v1.111/workers.html). \ No newline at end of file diff --git a/docs/upgrade.md b/docs/upgrade.md index cf53f56b06..52b1adbe90 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -119,13 +119,14 @@ stacking them up. You can monitor the currently running background updates with # Upgrading to v1.111.0 -## New worker endpoints for authenticated client media +## New worker endpoints for authenticated client and federation media [Media repository workers](./workers.md#synapseappmedia_repository) handling -Media APIs can now handle the following endpoint pattern: +Media APIs can now handle the following endpoint patterns: ``` ^/_matrix/client/v1/media/.*$ +^/_matrix/federation/v1/media/.*$ ``` Please update your reverse proxy configuration. diff --git a/docs/workers.md b/docs/workers.md index 22fde488a9..fbf539fa7e 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -740,6 +740,7 @@ Handles the media repository. It can handle all endpoints starting with: /_matrix/media/ /_matrix/client/v1/media/ + /_matrix/federation/v1/media/ ... and the following regular expressions matching media-specific administration APIs: From 1cf3ff6b40a9f0e72c39e471e921a46f56e4511f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 9 Jul 2024 12:26:45 -0500 Subject: [PATCH 020/332] Add `rooms` `name` and `avatar` to Sliding Sync `/sync` (#17418) Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17418.feature | 1 + synapse/handlers/sliding_sync.py | 149 ++++++++++++++-------- tests/rest/client/test_sync.py | 208 +++++++++++++++++++++++++++++++ 3 files changed, 304 insertions(+), 54 deletions(-) create mode 100644 changelog.d/17418.feature diff --git a/changelog.d/17418.feature b/changelog.d/17418.feature new file mode 100644 index 0000000000..c5e56bc500 --- /dev/null +++ b/changelog.d/17418.feature @@ -0,0 +1 @@ +Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 8e2f751c02..bb81ca9d97 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -18,6 +18,7 @@ # # import logging +from itertools import chain from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Set, Tuple import attr @@ -464,6 +465,7 @@ class SlidingSyncHandler: membership_state_keys = room_sync_config.required_state_map.get( EventTypes.Member ) + # Also see `StateFilter.must_await_full_state(...)` for comparison lazy_loading = ( membership_state_keys is not None and len(membership_state_keys) == 1 @@ -1202,7 +1204,7 @@ class SlidingSyncHandler: # Figure out any stripped state events for invite/knocks. This allows the # potential joiner to identify the room. - stripped_state: List[JsonDict] = [] + stripped_state: Optional[List[JsonDict]] = None if room_membership_for_user_at_to_token.membership in ( Membership.INVITE, Membership.KNOCK, @@ -1239,7 +1241,7 @@ class SlidingSyncHandler: # updates. initial = True - # Fetch the required state for the room + # Fetch the `required_state` for the room # # No `required_state` for invite/knock rooms (just `stripped_state`) # @@ -1247,13 +1249,15 @@ class SlidingSyncHandler: # of membership. Currently, we have to make this optional because # `invite`/`knock` rooms only have `stripped_state`. See # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 + # + # Calculate the `StateFilter` based on the `required_state` for the room room_state: Optional[StateMap[EventBase]] = None + required_room_state: Optional[StateMap[EventBase]] = None if room_membership_for_user_at_to_token.membership not in ( Membership.INVITE, Membership.KNOCK, ): - # Calculate the `StateFilter` based on the `required_state` for the room - state_filter: Optional[StateFilter] = StateFilter.none() + required_state_filter = StateFilter.none() # If we have a double wildcard ("*", "*") in the `required_state`, we need # to fetch all state for the room # @@ -1276,7 +1280,7 @@ class SlidingSyncHandler: if StateValues.WILDCARD in room_sync_config.required_state_map.get( StateValues.WILDCARD, set() ): - state_filter = StateFilter.all() + required_state_filter = StateFilter.all() # TODO: `StateFilter` currently doesn't support wildcard event types. We're # currently working around this by returning all state to the client but it # would be nice to fetch less from the database and return just what the @@ -1285,7 +1289,7 @@ class SlidingSyncHandler: room_sync_config.required_state_map.get(StateValues.WILDCARD) is not None ): - state_filter = StateFilter.all() + required_state_filter = StateFilter.all() else: required_state_types: List[Tuple[str, Optional[str]]] = [] for ( @@ -1317,51 +1321,88 @@ class SlidingSyncHandler: else: required_state_types.append((state_type, state_key)) - state_filter = StateFilter.from_types(required_state_types) + required_state_filter = StateFilter.from_types(required_state_types) - # We can skip fetching state if we don't need any - if state_filter != StateFilter.none(): - # We can return all of the state that was requested if we're doing an - # initial sync - if initial: - # People shouldn't see past their leave/ban event - if room_membership_for_user_at_to_token.membership in ( - Membership.LEAVE, - Membership.BAN, - ): - room_state = await self.storage_controllers.state.get_state_at( - room_id, - stream_position=to_token.copy_and_replace( - StreamKeyType.ROOM, - room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), - ), - state_filter=state_filter, - # Partially-stated rooms should have all state events except for - # the membership events and since we've already excluded - # partially-stated rooms unless `required_state` only has - # `["m.room.member", "$LAZY"]` for membership, we should be able - # to retrieve everything requested. Plus we don't want to block - # the whole sync waiting for this one room. - await_full_state=False, - ) - # Otherwise, we can get the latest current state in the room - else: - room_state = await self.storage_controllers.state.get_current_state( - room_id, - state_filter, - # Partially-stated rooms should have all state events except for - # the membership events and since we've already excluded - # partially-stated rooms unless `required_state` only has - # `["m.room.member", "$LAZY"]` for membership, we should be able - # to retrieve everything requested. Plus we don't want to block - # the whole sync waiting for this one room. - await_full_state=False, - ) - # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` + # We need this base set of info for the response so let's just fetch it along + # with the `required_state` for the room + META_ROOM_STATE = [(EventTypes.Name, ""), (EventTypes.RoomAvatar, "")] + state_filter = StateFilter( + types=StateFilter.from_types( + chain(META_ROOM_STATE, required_state_filter.to_types()) + ).types, + include_others=required_state_filter.include_others, + ) + + # We can return all of the state that was requested if this was the first + # time we've sent the room down this connection. + if initial: + # People shouldn't see past their leave/ban event + if room_membership_for_user_at_to_token.membership in ( + Membership.LEAVE, + Membership.BAN, + ): + room_state = await self.storage_controllers.state.get_state_at( + room_id, + stream_position=to_token.copy_and_replace( + StreamKeyType.ROOM, + room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), + ), + state_filter=state_filter, + # Partially-stated rooms should have all state events except for + # remote membership events. Since we've already excluded + # partially-stated rooms unless `required_state` only has + # `["m.room.member", "$LAZY"]` for membership, we should be able to + # retrieve everything requested. When we're lazy-loading, if there + # are some remote senders in the timeline, we should also have their + # membership event because we had to auth that timeline event. Plus + # we don't want to block the whole sync waiting for this one room. + await_full_state=False, + ) + # Otherwise, we can get the latest current state in the room else: - # TODO: Once we can figure out if we've sent a room down this connection before, - # we can return updates instead of the full required state. - raise NotImplementedError() + room_state = await self.storage_controllers.state.get_current_state( + room_id, + state_filter, + # Partially-stated rooms should have all state events except for + # remote membership events. Since we've already excluded + # partially-stated rooms unless `required_state` only has + # `["m.room.member", "$LAZY"]` for membership, we should be able to + # retrieve everything requested. When we're lazy-loading, if there + # are some remote senders in the timeline, we should also have their + # membership event because we had to auth that timeline event. Plus + # we don't want to block the whole sync waiting for this one room. + await_full_state=False, + ) + # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` + else: + # TODO: Once we can figure out if we've sent a room down this connection before, + # we can return updates instead of the full required state. + raise NotImplementedError() + + if required_state_filter != StateFilter.none(): + required_room_state = required_state_filter.filter_state(room_state) + + # Find the room name and avatar from the state + room_name: Optional[str] = None + room_avatar: Optional[str] = None + if room_state is not None: + name_event = room_state.get((EventTypes.Name, "")) + if name_event is not None: + room_name = name_event.content.get("name") + + avatar_event = room_state.get((EventTypes.RoomAvatar, "")) + if avatar_event is not None: + room_avatar = avatar_event.content.get("url") + elif stripped_state is not None: + for event in stripped_state: + if event["type"] == EventTypes.Name: + room_name = event.get("content", {}).get("name") + elif event["type"] == EventTypes.RoomAvatar: + room_avatar = event.get("content", {}).get("url") + + # Found everything so we can stop looking + if room_name is not None and room_avatar is not None: + break # Figure out the last bump event in the room last_bump_event_result = ( @@ -1378,16 +1419,16 @@ class SlidingSyncHandler: bump_stamp = bump_event_pos.stream return SlidingSyncResult.RoomResult( - # TODO: Dummy value - name=None, - # TODO: Dummy value - avatar=None, + name=room_name, + avatar=room_avatar, # TODO: Dummy value heroes=None, # TODO: Dummy value is_dm=False, initial=initial, - required_state=list(room_state.values()) if room_state else None, + required_state=( + list(required_room_state.values()) if required_room_state else None + ), timeline_events=timeline_events, bundled_aggregations=bundled_aggregations, stripped_state=stripped_state, diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 6ff1f03c9a..f7852562b1 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -1802,6 +1802,206 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["lists"]["foo-list"], ) + def test_rooms_meta_when_joined(self) -> None: + """ + Test that the `rooms` `name` and `avatar` (soon to test `heroes`) are included + in the response when the user is joined to the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Reflect the current state of the room + self.assertEqual( + channel.json_body["rooms"][room_id1]["name"], + "my super room", + channel.json_body["rooms"][room_id1], + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["avatar"], + "mxc://DUMMY_MEDIA_ID", + channel.json_body["rooms"][room_id1], + ) + + def test_rooms_meta_when_invited(self) -> None: + """ + Test that the `rooms` `name` and `avatar` (soon to test `heroes`) are included + in the response when the user is invited to the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Update the room name after user1 has left + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Update the room avatar URL after user1 has left + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://UPDATED_DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # This should still reflect the current state of the room even when the user is + # invited. + self.assertEqual( + channel.json_body["rooms"][room_id1]["name"], + "my super duper room", + channel.json_body["rooms"][room_id1], + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["avatar"], + "mxc://UPDATED_DUMMY_MEDIA_ID", + channel.json_body["rooms"][room_id1], + ) + + def test_rooms_meta_when_banned(self) -> None: + """ + Test that the `rooms` `name` and `avatar` (soon to test `heroes`) reflect the + state of the room when the user was banned (do not leak current state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # Update the room name after user1 has left + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Update the room avatar URL after user1 has left + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://UPDATED_DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Reflect the state of the room at the time of leaving + self.assertEqual( + channel.json_body["rooms"][room_id1]["name"], + "my super room", + channel.json_body["rooms"][room_id1], + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["avatar"], + "mxc://DUMMY_MEDIA_ID", + channel.json_body["rooms"][room_id1], + ) + def test_rooms_limited_initial_sync(self) -> None: """ Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit` @@ -2973,6 +3173,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_incremental_sync(self) -> None: """ @@ -3027,6 +3228,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard(self) -> None: """ @@ -3084,6 +3286,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): state_map.values(), exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard_event_type(self) -> None: """ @@ -3147,6 +3350,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # events when the `event_type` is a wildcard. exact=False, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard_state_key(self) -> None: """ @@ -3192,6 +3396,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_lazy_loading_room_members(self) -> None: """ @@ -3247,6 +3452,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: @@ -3329,6 +3535,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_combine_superset(self) -> None: """ @@ -3401,6 +3608,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): }, exact=True, ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_partial_state(self) -> None: """ From 22fbc5be5482beaefc98a16c92f0e5b0dad84151 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Jul 2024 18:29:32 +0100 Subject: [PATCH 021/332] Fix new media APIs when using synapse.app.media_repository (#17420) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17420.bugfix | 1 + synapse/federation/transport/server/__init__.py | 2 +- synapse/rest/__init__.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17420.bugfix diff --git a/changelog.d/17420.bugfix b/changelog.d/17420.bugfix new file mode 100644 index 0000000000..343f9b9331 --- /dev/null +++ b/changelog.d/17420.bugfix @@ -0,0 +1 @@ +Fix bug where using `synapse.app.media_repository` worker configuration would break the new media endpoints. diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index 5f997040d0..72599bb204 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -321,7 +321,7 @@ def register_servlets( servletclass == FederationMediaDownloadServlet or servletclass == FederationMediaThumbnailServlet ): - if not hs.config.server.enable_media_repo: + if not hs.config.media.can_load_media_repo: continue servletclass( diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index c94d454a28..1aa9ea3877 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -145,7 +145,7 @@ class ClientRestResource(JsonResource): password_policy.register_servlets(hs, client_resource) knock.register_servlets(hs, client_resource) appservice_ping.register_servlets(hs, client_resource) - if hs.config.server.enable_media_repo: + if hs.config.media.can_load_media_repo: from synapse.rest.client import media media.register_servlets(hs, client_resource) From e0ff850cb7e245e09a3ed7aacf064846732a9a2d Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 10 Jul 2024 10:47:35 +0200 Subject: [PATCH 022/332] 1.111.0rc2 --- CHANGES.md | 17 +++++++++++++++++ changelog.d/17420.bugfix | 1 - changelog.d/17421.doc | 1 - changelog.d/17422.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 6 files changed, 24 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/17420.bugfix delete mode 100644 changelog.d/17421.doc delete mode 100644 changelog.d/17422.misc diff --git a/CHANGES.md b/CHANGES.md index cbd8f1062e..8279960b5b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,20 @@ +# Synapse 1.111.0rc2 (2024-07-10) + +### Bugfixes + +- Fix bug where using `synapse.app.media_repository` worker configuration would break the new media endpoints. ([\#17420](https://github.com/element-hq/synapse/issues/17420)) + +### Improved Documentation + +- Document the new federation media worker endpoints in the [upgrade notes](https://element-hq.github.io/synapse/v1.111/upgrade.html) and [worker docs](https://element-hq.github.io/synapse/v1.111/workers.html). ([\#17421](https://github.com/element-hq/synapse/issues/17421)) + +### Internal Changes + +- Route authenticated federation media requests to media repository workers in Complement tests. ([\#17422](https://github.com/element-hq/synapse/issues/17422)) + + + + # Synapse 1.111.0rc1 (2024-07-09) ### Features diff --git a/changelog.d/17420.bugfix b/changelog.d/17420.bugfix deleted file mode 100644 index 343f9b9331..0000000000 --- a/changelog.d/17420.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where using `synapse.app.media_repository` worker configuration would break the new media endpoints. diff --git a/changelog.d/17421.doc b/changelog.d/17421.doc deleted file mode 100644 index e6a495e5d6..0000000000 --- a/changelog.d/17421.doc +++ /dev/null @@ -1 +0,0 @@ -Document the new federation media worker endpoints in the [upgrade notes](https://element-hq.github.io/synapse/v1.111/upgrade.html) and [worker docs](https://element-hq.github.io/synapse/v1.111/workers.html). \ No newline at end of file diff --git a/changelog.d/17422.misc b/changelog.d/17422.misc deleted file mode 100644 index 1f53e3c579..0000000000 --- a/changelog.d/17422.misc +++ /dev/null @@ -1 +0,0 @@ -Route authenticated federation media requests to media repository workers in Complement tests. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 5fc9e10a29..0f3dcc64e6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.111.0~rc2) stable; urgency=medium + + * New synapse release 1.111.0rc2. + + -- Synapse Packaging team Wed, 10 Jul 2024 08:46:54 +0000 + matrix-synapse-py3 (1.111.0~rc1) stable; urgency=medium * New synapse release 1.111.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 0b1f03bb81..41de90f9f6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.111.0rc1" +version = "1.111.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 4ca13ce0dd6d1dc931cfde7e06191200ca0ec066 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Jul 2024 11:58:42 +0100 Subject: [PATCH 023/332] Handle to-device extensions to Sliding Sync (#17416) Implements MSC3885 --------- Co-authored-by: Eric Eastwood --- changelog.d/17416.feature | 1 + synapse/handlers/sliding_sync.py | 103 ++++++++++++- synapse/rest/client/sync.py | 17 ++- synapse/types/handlers/__init__.py | 35 ++++- synapse/types/rest/client/__init__.py | 48 ++++++- tests/rest/client/test_sync.py | 200 +++++++++++++++++++++++++- 6 files changed, 392 insertions(+), 12 deletions(-) create mode 100644 changelog.d/17416.feature diff --git a/changelog.d/17416.feature b/changelog.d/17416.feature new file mode 100644 index 0000000000..1d119cf48f --- /dev/null +++ b/changelog.d/17416.feature @@ -0,0 +1 @@ +Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index bb81ca9d97..818b13621c 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -542,11 +542,15 @@ class SlidingSyncHandler: rooms[room_id] = room_sync_result + extensions = await self.get_extensions_response( + sync_config=sync_config, to_token=to_token + ) + return SlidingSyncResult( next_pos=to_token, lists=lists, rooms=rooms, - extensions={}, + extensions=extensions, ) async def get_sync_room_ids_for_user( @@ -1445,3 +1449,100 @@ class SlidingSyncHandler: notification_count=0, highlight_count=0, ) + + async def get_extensions_response( + self, + sync_config: SlidingSyncConfig, + to_token: StreamToken, + ) -> SlidingSyncResult.Extensions: + """Handle extension requests. + + Args: + sync_config: Sync configuration + to_token: The point in the stream to sync up to. + """ + + if sync_config.extensions is None: + return SlidingSyncResult.Extensions() + + to_device_response = None + if sync_config.extensions.to_device: + to_device_response = await self.get_to_device_extensions_response( + sync_config=sync_config, + to_device_request=sync_config.extensions.to_device, + to_token=to_token, + ) + + return SlidingSyncResult.Extensions(to_device=to_device_response) + + async def get_to_device_extensions_response( + self, + sync_config: SlidingSyncConfig, + to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension, + to_token: StreamToken, + ) -> SlidingSyncResult.Extensions.ToDeviceExtension: + """Handle to-device extension (MSC3885) + + Args: + sync_config: Sync configuration + to_device_request: The to-device extension from the request + to_token: The point in the stream to sync up to. + """ + + user_id = sync_config.user.to_string() + device_id = sync_config.device_id + + # Check that this request has a valid device ID (not all requests have + # to belong to a device, and so device_id is None), and that the + # extension is enabled. + if device_id is None or not to_device_request.enabled: + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=f"{to_token.to_device_key}", + events=[], + ) + + since_stream_id = 0 + if to_device_request.since is not None: + # We've already validated this is an int. + since_stream_id = int(to_device_request.since) + + if to_token.to_device_key < since_stream_id: + # The since token is ahead of our current token, so we return an + # empty response. + logger.warning( + "Got to-device.since from the future. since token: %r is ahead of our current to_device stream position: %r", + since_stream_id, + to_token.to_device_key, + ) + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=to_device_request.since, + events=[], + ) + + # Delete everything before the given since token, as we know the + # device must have received them. + deleted = await self.store.delete_messages_for_device( + user_id=user_id, + device_id=device_id, + up_to_stream_id=since_stream_id, + ) + + logger.debug( + "Deleted %d to-device messages up to %d for %s", + deleted, + since_stream_id, + user_id, + ) + + messages, stream_id = await self.store.get_messages_for_device( + user_id=user_id, + device_id=device_id, + from_stream_id=since_stream_id, + to_stream_id=to_token.to_device_key, + limit=min(to_device_request.limit, 100), # Limit to at most 100 events + ) + + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=f"{stream_id}", + events=messages, + ) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 13aed1dc85..94d5faf9f7 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -942,7 +942,9 @@ class SlidingSyncRestServlet(RestServlet): response["rooms"] = await self.encode_rooms( requester, sliding_sync_result.rooms ) - response["extensions"] = {} # TODO: sliding_sync_result.extensions + response["extensions"] = await self.encode_extensions( + requester, sliding_sync_result.extensions + ) return response @@ -1054,6 +1056,19 @@ class SlidingSyncRestServlet(RestServlet): return serialized_rooms + async def encode_extensions( + self, requester: Requester, extensions: SlidingSyncResult.Extensions + ) -> JsonDict: + result = {} + + if extensions.to_device is not None: + result["to_device"] = { + "next_batch": extensions.to_device.next_batch, + "events": extensions.to_device.events, + } + + return result + def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: SyncRestServlet(hs).register(http_server) diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 43dcdf20dd..a8a3a8f242 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -18,7 +18,7 @@ # # from enum import Enum -from typing import TYPE_CHECKING, Dict, Final, List, Optional, Tuple +from typing import TYPE_CHECKING, Dict, Final, List, Optional, Sequence, Tuple import attr from typing_extensions import TypedDict @@ -252,10 +252,39 @@ class SlidingSyncResult: count: int ops: List[Operation] + @attr.s(slots=True, frozen=True, auto_attribs=True) + class Extensions: + """Responses for extensions + + Attributes: + to_device: The to-device extension (MSC3885) + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ToDeviceExtension: + """The to-device extension (MSC3885) + + Attributes: + next_batch: The to-device stream token the client should use + to get more results + events: A list of to-device messages for the client + """ + + next_batch: str + events: Sequence[JsonMapping] + + def __bool__(self) -> bool: + return bool(self.events) + + to_device: Optional[ToDeviceExtension] = None + + def __bool__(self) -> bool: + return bool(self.to_device) + next_pos: StreamToken lists: Dict[str, SlidingWindowList] rooms: Dict[str, RoomResult] - extensions: JsonMapping + extensions: Extensions def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used @@ -271,5 +300,5 @@ class SlidingSyncResult: next_pos=next_pos, lists={}, rooms={}, - extensions={}, + extensions=SlidingSyncResult.Extensions(), ) diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 55f6b44053..1e8fe76c99 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -276,10 +276,48 @@ class SlidingSyncBody(RequestBodyModel): class RoomSubscription(CommonRoomParameters): pass - class Extension(RequestBodyModel): - enabled: Optional[StrictBool] = False - lists: Optional[List[StrictStr]] = None - rooms: Optional[List[StrictStr]] = None + class Extensions(RequestBodyModel): + """The extensions section of the request. + + Extensions MUST have an `enabled` flag which defaults to `false`. If a client + sends an unknown extension name, the server MUST ignore it (or else backwards + compatibility between clients and servers is broken when a newer client tries to + communicate with an older server). + """ + + class ToDeviceExtension(RequestBodyModel): + """The to-device extension (MSC3885) + + Attributes: + enabled + limit: Maximum number of to-device messages to return + since: The `next_batch` from the previous sync response + """ + + enabled: Optional[StrictBool] = False + limit: StrictInt = 100 + since: Optional[StrictStr] = None + + @validator("since") + def since_token_check( + cls, value: Optional[StrictStr] + ) -> Optional[StrictStr]: + # `since` comes in as an opaque string token but we know that it's just + # an integer representing the position in the device inbox stream. We + # want to pre-validate it to make sure it works fine in downstream code. + if value is None: + return value + + try: + int(value) + except ValueError: + raise ValueError( + "'extensions.to_device.since' is invalid (should look like an int)" + ) + + return value + + to_device: Optional[ToDeviceExtension] = None # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: @@ -287,7 +325,7 @@ class SlidingSyncBody(RequestBodyModel): else: lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = None # type: ignore[valid-type] room_subscriptions: Optional[Dict[StrictStr, RoomSubscription]] = None - extensions: Optional[Dict[StrictStr, Extension]] = None + extensions: Optional[Extensions] = None @validator("lists") def lists_length_check( diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index f7852562b1..304c0d4d3d 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -38,7 +38,16 @@ from synapse.api.constants import ( ) from synapse.events import EventBase from synapse.handlers.sliding_sync import StateValues -from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync +from synapse.rest.client import ( + devices, + knock, + login, + read_marker, + receipts, + room, + sendtodevice, + sync, +) from synapse.server import HomeServer from synapse.types import JsonDict, RoomStreamToken, StreamKeyType, StreamToken, UserID from synapse.util import Clock @@ -47,7 +56,7 @@ from tests import unittest from tests.federation.transport.test_knocking import ( KnockingStrippedStateEventHelperMixin, ) -from tests.server import TimedOutException +from tests.server import FakeChannel, TimedOutException from tests.test_utils.event_injection import mark_event_as_partial_state logger = logging.getLogger(__name__) @@ -3696,3 +3705,190 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ], channel.json_body["lists"]["foo-list"], ) + + +class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): + """Tests for the to-device sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + sync.register_servlets, + sendtodevice.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.sync_endpoint = ( + "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + ) + + def _assert_to_device_response( + self, channel: FakeChannel, expected_messages: List[JsonDict] + ) -> str: + """Assert the sliding sync response was successful and has the expected + to-device messages. + + Returns the next_batch token from the to-device section. + """ + self.assertEqual(channel.code, 200, channel.json_body) + extensions = channel.json_body["extensions"] + to_device = extensions["to_device"] + self.assertIsInstance(to_device["next_batch"], str) + self.assertEqual(to_device["events"], expected_messages) + + return to_device["next_batch"] + + def test_no_data(self) -> None: + """Test that enabling to-device extension works, even if there is + no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + + # We expect no to-device messages + self._assert_to_device_response(channel, []) + + def test_data_initial_sync(self) -> None: + """Test that we get to-device messages when we don't specify a since + token""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", "d1") + user2_id = self.register_user("u2", "pass") + user2_tok = self.login(user2_id, "pass", "d2") + + # Send the to-device message + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user1_id: {"d1": test_msg}}}, + access_token=user2_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self._assert_to_device_response( + channel, + [{"content": test_msg, "sender": user2_id, "type": "m.test"}], + ) + + def test_data_incremental_sync(self) -> None: + """Test that we get to-device messages over incremental syncs""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", "d1") + user2_id = self.register_user("u2", "pass") + user2_tok = self.login(user2_id, "pass", "d2") + + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + # No to-device messages yet. + next_batch = self._assert_to_device_response(channel, []) + + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user1_id: {"d1": test_msg}}}, + access_token=user2_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + "since": next_batch, + } + }, + }, + access_token=user1_tok, + ) + next_batch = self._assert_to_device_response( + channel, + [{"content": test_msg, "sender": user2_id, "type": "m.test"}], + ) + + # The next sliding sync request should not include the to-device + # message. + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + "since": next_batch, + } + }, + }, + access_token=user1_tok, + ) + self._assert_to_device_response(channel, []) + + # An initial sliding sync request should not include the to-device + # message, as it should have been deleted + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self._assert_to_device_response(channel, []) From 3232bc29826d030e62c034ec582c73532bc7e465 Mon Sep 17 00:00:00 2001 From: Will Lewis <1543626+wrjlewis@users.noreply.github.com> Date: Wed, 10 Jul 2024 14:59:24 +0100 Subject: [PATCH 024/332] Upload new logo with white bg and update readme to use it (#17387) --- README.rst | 2 +- changelog.d/17387.doc | 1 + docs/element_logo_white_bg.svg | 94 ++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17387.doc create mode 100644 docs/element_logo_white_bg.svg diff --git a/README.rst b/README.rst index a52e0c193d..d5625afe8f 100644 --- a/README.rst +++ b/README.rst @@ -1,4 +1,4 @@ -.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7 +.. image:: ./docs/element_logo_white_bg.svg :height: 60px **Element Synapse - Matrix homeserver implementation** diff --git a/changelog.d/17387.doc b/changelog.d/17387.doc new file mode 100644 index 0000000000..82be10f135 --- /dev/null +++ b/changelog.d/17387.doc @@ -0,0 +1 @@ +Update the readme image to have a white background, so that it is readable in dark mode. \ No newline at end of file diff --git a/docs/element_logo_white_bg.svg b/docs/element_logo_white_bg.svg new file mode 100644 index 0000000000..50195ab1c8 --- /dev/null +++ b/docs/element_logo_white_bg.svg @@ -0,0 +1,94 @@ + + + + From 30e14c8510ab7b0b73284ba3f9bfa5970f8fef61 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 11:54:21 +0100 Subject: [PATCH 025/332] Bump zipp from 3.15.0 to 3.19.1 (#17427) --- poetry.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index 19393bb6b3..f578a33ed5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3113,18 +3113,18 @@ docs = ["Sphinx", "elementpath (>=4.1.5,<5.0.0)", "jinja2", "sphinx-rtd-theme"] [[package]] name = "zipp" -version = "3.15.0" +version = "3.19.1" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, - {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, + {file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"}, + {file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [[package]] name = "zope-event" From 5871daf87769f44f81c6c6adb78a7aedd6c4389e Mon Sep 17 00:00:00 2001 From: Joe Groocock Date: Thu, 11 Jul 2024 14:56:25 +0200 Subject: [PATCH 026/332] Use consistent casing between FROM and AS (#17431) Signed-off-by: Joe Groocock --- docker/Dockerfile | 4 ++-- docker/Dockerfile-dhvirtualenv | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 1bef8045ca..1da196b12e 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11 ### # We hardcode the use of Debian bookworm here because this could change upstream # and other Dockerfiles used for testing are expecting bookworm. -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS requirements # RUN --mount is specific to buildkit and is documented at # https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount. @@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ ### ### Stage 1: builder ### -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS builder # install the OS build deps RUN \ diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index f000144567..9266f134be 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -24,7 +24,7 @@ ARG distro="" # https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but # it's not obviously easier to use that than to build our own.) -FROM docker.io/library/${distro} as builder +FROM docker.io/library/${distro} AS builder RUN apt-get update -qq -o Acquire::Languages=none RUN env DEBIAN_FRONTEND=noninteractive apt-get install \ From 342f0c35b727fc807ead71ba2fbeb876cff523a8 Mon Sep 17 00:00:00 2001 From: villepeh <100730729+villepeh@users.noreply.github.com> Date: Thu, 11 Jul 2024 16:02:19 +0300 Subject: [PATCH 027/332] Add Red Hat Enterprise Linux and Rocky Linux installation instructions (#17423) Added RHEL/Rocky install instructions (PyPI). Instructions cover versions 8 and 9 which are the only supported ones - except for RHEL7 which is now on extended life cycle support phase. Large part of the guide is for installing Python 3.11 or 3.12. RHEL8 ships with Python 3.6 and RHEL9 ships with 3.9. Newer Python versions can be installed easily as they don't interfere with OS software that still relies on the default Python version. I was first planning to add prerequisites part to the prerequisites section and then install instructions on the top of the page but that section is for pre-built packages so it just didn't sound right. So I just dumped everything to the PyPI section of the page. But suggestions to change are welcome. I also didn't combine these with Fedora section. I haven't tested those packages on RHEL and Fedora ships with Python 3.12 out-of-box. --- changelog.d/17423.doc | 1 + docs/setup/forward_proxy.md | 2 +- docs/setup/installation.md | 57 ++++++++++++++++++++++++++++++++++++- 3 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17423.doc diff --git a/changelog.d/17423.doc b/changelog.d/17423.doc new file mode 100644 index 0000000000..972bc659e4 --- /dev/null +++ b/changelog.d/17423.doc @@ -0,0 +1 @@ +Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. diff --git a/docs/setup/forward_proxy.md b/docs/setup/forward_proxy.md index 3482691f83..f02c7b5fc5 100644 --- a/docs/setup/forward_proxy.md +++ b/docs/setup/forward_proxy.md @@ -67,7 +67,7 @@ in Synapse can be deactivated. **NOTE**: This has an impact on security and is for testing purposes only! To deactivate the certificate validation, the following setting must be added to -your [homserver.yaml](../usage/configuration/homeserver_sample_config.md). +your [homeserver.yaml](../usage/configuration/homeserver_sample_config.md). ```yaml use_insecure_ssl_client_just_for_testing_do_not_use: true diff --git a/docs/setup/installation.md b/docs/setup/installation.md index ed3e59c470..f538e1498a 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -309,7 +309,62 @@ sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \ libwebp-devel libxml2-devel libxslt-devel libpq-devel \ python3-virtualenv libffi-devel openssl-devel python3-devel \ libicu-devel -sudo dnf groupinstall "Development Tools" +sudo dnf group install "Development Tools" +``` + +##### Red Hat Enterprise Linux / Rocky Linux + +*Note: The term "RHEL" below refers to both Red Hat Enterprise Linux and Rocky Linux. The distributions are 1:1 binary compatible.* + +It's recommended to use the latest Python versions. + +RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ship with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them. + +Python 3.11 and 3.12 are available for both RHEL 8 and 9. + +These commands should be run as root user. + +RHEL 8 +```bash +# Enable PowerTools repository +dnf config-manager --set-enabled powertools +``` +RHEL 9 +```bash +# Enable CodeReady Linux Builder repository +crb enable +``` + +Install new version of Python. You only need one of these: +```bash +# Python 3.11 +dnf install python3.11 python3.11-devel +``` +```bash +# Python 3.12 +dnf install python3.12 python3.12-devel +``` +Finally, install common prerequisites +```bash +dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf +dnf group install "Development Tools" +``` +###### Using venv module instead of virtualenv command + +It's recommended to use Python venv module directly rather than the virtualenv command. +* On RHEL 9, virtualenv is only available on [EPEL](https://docs.fedoraproject.org/en-US/epel/). +* On RHEL 8, virtualenv is based on Python 3.6. It does not support creating 3.11/3.12 virtual environments. + +Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI. + +```bash +mkdir -p ~/synapse +# To use Python 3.11, simply use the command "python3.11" instead. +python3.12 -m venv ~/synapse/env +source ~/synapse/env/bin/activate +pip install --upgrade pip +pip install --upgrade setuptools +pip install matrix-synapse ``` ##### macOS From 677142b6a965baf36a3cc59c4e997bbd67891d42 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 11 Jul 2024 07:03:13 -0600 Subject: [PATCH 028/332] Fix docs on `record_action` to clarify the actions are applied (#17426) This looks like a copy/paste error: the function doesn't reject anything, but instead allows the action count to go through regardless. The remainder of the function's documentation appears correct. --- changelog.d/17426.misc | 1 + synapse/api/ratelimiting.py | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17426.misc diff --git a/changelog.d/17426.misc b/changelog.d/17426.misc new file mode 100644 index 0000000000..886e5d4389 --- /dev/null +++ b/changelog.d/17426.misc @@ -0,0 +1 @@ +Fix documentation on `RateLimiter#record_action`. \ No newline at end of file diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 26b8711851..b80630c5d3 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -236,9 +236,8 @@ class Ratelimiter: requester: The requester that is doing the action, if any. key: An arbitrary key used to classify an action. Defaults to the requester's user ID. - n_actions: The number of times the user wants to do this action. If the user - cannot do all of the actions, the user's action count is not incremented - at all. + n_actions: The number of times the user performed the action. May be negative + to "refund" the rate limit. _time_now_s: The current time. Optional, defaults to the current time according to self.clock. Only used by tests. """ From 606da398fc4c693f2e75b9520264e0fc51d03581 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 11 Jul 2024 16:00:44 +0100 Subject: [PATCH 029/332] Fix filtering room types on remote rooms (#17434) We can only fetch room types for rooms the server is in, so we need to only filter rooms that we're joined to. Also includes a perf fix to bulk fetch room types. --- changelog.d/17434.bugfix | 1 + synapse/handlers/sliding_sync.py | 22 ++++---- synapse/storage/databases/main/state.py | 52 ++++++++++++++++++- tests/handlers/test_sliding_sync.py | 68 +++++++++++++++++++++++++ 4 files changed, 130 insertions(+), 13 deletions(-) create mode 100644 changelog.d/17434.bugfix diff --git a/changelog.d/17434.bugfix b/changelog.d/17434.bugfix new file mode 100644 index 0000000000..c7cce52397 --- /dev/null +++ b/changelog.d/17434.bugfix @@ -0,0 +1 @@ +Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 818b13621c..8e6c2fb860 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -24,13 +24,7 @@ from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Set, Tuple import attr from immutabledict import immutabledict -from synapse.api.constants import ( - AccountDataTypes, - Direction, - EventContentFields, - EventTypes, - Membership, -) +from synapse.api.constants import AccountDataTypes, Direction, EventTypes, Membership from synapse.events import EventBase from synapse.events.utils import strip_event from synapse.handlers.relations import BundledAggregations @@ -959,11 +953,15 @@ class SlidingSyncHandler: # provided in the list. `None` is a valid type for rooms which do not have a # room type. if filters.room_types is not None or filters.not_room_types is not None: - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - create_event = await self.store.get_create_event_for_room(room_id) - room_type = create_event.content.get(EventContentFields.ROOM_TYPE) + room_to_type = await self.store.bulk_get_room_type( + { + room_id + for room_id in filtered_room_id_set + # We only know the room types for joined rooms + if sync_room_map[room_id].membership == Membership.JOIN + } + ) + for room_id, room_type in room_to_type.items(): if ( filters.room_types is not None and room_type not in filters.room_types diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index b2a67aff89..5188b2f7a4 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -41,7 +41,7 @@ from typing import ( import attr -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase @@ -298,6 +298,56 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): create_event = await self.get_event(create_id) return create_event + @cached(max_entries=10000) + async def get_room_type(self, room_id: str) -> Optional[str]: + """Get the room type for a given room. The server must be joined to the + given room. + """ + + row = await self.db_pool.simple_select_one( + table="room_stats_state", + keyvalues={"room_id": room_id}, + retcols=("room_type",), + allow_none=True, + desc="get_room_type", + ) + + if row is not None: + return row[0] + + # If we haven't updated `room_stats_state` with the room yet, query the + # create event directly. + create_event = await self.get_create_event_for_room(room_id) + room_type = create_event.content.get(EventContentFields.ROOM_TYPE) + return room_type + + @cachedList(cached_method_name="get_room_type", list_name="room_ids") + async def bulk_get_room_type( + self, room_ids: Set[str] + ) -> Mapping[str, Optional[str]]: + """Bulk fetch room types for the given rooms, the server must be in all + the rooms given. + """ + + rows = await self.db_pool.simple_select_many_batch( + table="room_stats_state", + column="room_id", + iterable=room_ids, + retcols=("room_id", "room_type"), + desc="bulk_get_room_type", + ) + + # If we haven't updated `room_stats_state` with the room yet, query the + # create events directly. This should happen only rarely so we don't + # mind if we do this in a loop. + results = dict(rows) + for room_id in room_ids - results.keys(): + create_event = await self.get_create_event_for_room(room_id) + room_type = create_event.content.get(EventContentFields.ROOM_TYPE) + results[room_id] = room_type + + return results + @cached(max_entries=100000, iterable=True) async def get_partial_current_state_ids(self, room_id: str) -> StateMap[str]: """Get the current state event ids for a room based on the diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 9dd2363adc..eb4b0a05c7 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -35,6 +35,8 @@ from synapse.api.constants import ( RoomTypes, ) from synapse.api.room_versions import RoomVersions +from synapse.events import make_event_from_dict +from synapse.events.snapshot import EventContext from synapse.handlers.sliding_sync import RoomSyncConfig, StateValues from synapse.rest import admin from synapse.rest.client import knock, login, room @@ -2791,6 +2793,72 @@ class FilterRoomsTestCase(HomeserverTestCase): self.assertEqual(filtered_room_map.keys(), {space_room_id}) + def test_filter_room_types_with_invite_remote_room(self) -> None: + """Test that we can apply a room type filter, even if we have an invite + for a remote room. + + This is a regression test. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a fake remote invite and persist it. + invite_room_id = "!some:room" + invite_event = make_event_from_dict( + { + "room_id": invite_room_id, + "sender": "@user:test.serv", + "state_key": user1_id, + "depth": 1, + "origin_server_ts": 1, + "type": EventTypes.Member, + "content": {"membership": Membership.INVITE}, + "auth_events": [], + "prev_events": [], + }, + room_version=RoomVersions.V10, + ) + invite_event.internal_metadata.outlier = True + invite_event.internal_metadata.out_of_band_membership = True + + self.get_success( + self.store.maybe_store_room_on_outlier_membership( + room_id=invite_room_id, room_version=invite_event.room_version + ) + ) + context = EventContext.for_outlier(self.hs.get_storage_controllers()) + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + self.get_success(persist_controller.persist_event(invite_event, context)) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self.get_success( + self.sliding_sync_handler.get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, + ) + ) + + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + room_types=[None, RoomTypes.SPACE], + ), + after_rooms_token, + ) + ) + + self.assertEqual(filtered_room_map.keys(), {room_id, invite_room_id}) + class SortRoomsTestCase(HomeserverTestCase): """ From 5a97bbd8958548f16461cfa3fde201f3e032d6b8 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 11 Jul 2024 14:05:38 -0500 Subject: [PATCH 030/332] Add `heroes` and room summary fields to Sliding Sync `/sync` (#17419) Additional room summary fields: `joined_count`, `invited_count` Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17419.feature | 1 + synapse/handlers/sliding_sync.py | 278 ++++++++++++++++------- synapse/rest/client/sync.py | 32 ++- synapse/types/handlers/__init__.py | 18 +- synapse/types/rest/client/__init__.py | 4 - tests/rest/client/test_sync.py | 304 +++++++++++++++++++++++++- 6 files changed, 528 insertions(+), 109 deletions(-) create mode 100644 changelog.d/17419.feature diff --git a/changelog.d/17419.feature b/changelog.d/17419.feature new file mode 100644 index 0000000000..186a27c470 --- /dev/null +++ b/changelog.d/17419.feature @@ -0,0 +1 @@ +Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 8e6c2fb860..e3230d28e6 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -19,7 +19,7 @@ # import logging from itertools import chain -from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Any, Dict, Final, List, Mapping, Optional, Set, Tuple import attr from immutabledict import immutabledict @@ -28,7 +28,9 @@ from synapse.api.constants import AccountDataTypes, Direction, EventTypes, Membe from synapse.events import EventBase from synapse.events.utils import strip_event from synapse.handlers.relations import BundledAggregations +from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.databases.main.stream import CurrentStateDeltaMembership +from synapse.storage.roommember import MemberSummary from synapse.types import ( JsonDict, PersistedEventPosition, @@ -1043,6 +1045,103 @@ class SlidingSyncHandler: reverse=True, ) + async def get_current_state_ids_at( + self, + room_id: str, + room_membership_for_user_at_to_token: _RoomMembershipForUser, + state_filter: StateFilter, + to_token: StreamToken, + ) -> StateMap[str]: + """ + Get current state IDs for the user in the room according to their membership. This + will be the current state at the time of their LEAVE/BAN, otherwise will be the + current state <= to_token. + + Args: + room_id: The room ID to fetch data for + room_membership_for_user_at_token: Membership information for the user + in the room at the time of `to_token`. + to_token: The point in the stream to sync up to. + """ + + room_state_ids: StateMap[str] + # People shouldn't see past their leave/ban event + if room_membership_for_user_at_to_token.membership in ( + Membership.LEAVE, + Membership.BAN, + ): + # TODO: `get_state_ids_at(...)` doesn't take into account the "current state" + room_state_ids = await self.storage_controllers.state.get_state_ids_at( + room_id, + stream_position=to_token.copy_and_replace( + StreamKeyType.ROOM, + room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), + ), + state_filter=state_filter, + # Partially-stated rooms should have all state events except for + # remote membership events. Since we've already excluded + # partially-stated rooms unless `required_state` only has + # `["m.room.member", "$LAZY"]` for membership, we should be able to + # retrieve everything requested. When we're lazy-loading, if there + # are some remote senders in the timeline, we should also have their + # membership event because we had to auth that timeline event. Plus + # we don't want to block the whole sync waiting for this one room. + await_full_state=False, + ) + # Otherwise, we can get the latest current state in the room + else: + room_state_ids = await self.storage_controllers.state.get_current_state_ids( + room_id, + state_filter, + # Partially-stated rooms should have all state events except for + # remote membership events. Since we've already excluded + # partially-stated rooms unless `required_state` only has + # `["m.room.member", "$LAZY"]` for membership, we should be able to + # retrieve everything requested. When we're lazy-loading, if there + # are some remote senders in the timeline, we should also have their + # membership event because we had to auth that timeline event. Plus + # we don't want to block the whole sync waiting for this one room. + await_full_state=False, + ) + # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` + + return room_state_ids + + async def get_current_state_at( + self, + room_id: str, + room_membership_for_user_at_to_token: _RoomMembershipForUser, + state_filter: StateFilter, + to_token: StreamToken, + ) -> StateMap[EventBase]: + """ + Get current state for the user in the room according to their membership. This + will be the current state at the time of their LEAVE/BAN, otherwise will be the + current state <= to_token. + + Args: + room_id: The room ID to fetch data for + room_membership_for_user_at_token: Membership information for the user + in the room at the time of `to_token`. + to_token: The point in the stream to sync up to. + """ + room_state_ids = await self.get_current_state_ids_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=state_filter, + to_token=to_token, + ) + + event_map = await self.store.get_events(list(room_state_ids.values())) + + state_map = {} + for key, event_id in room_state_ids.items(): + event = event_map.get(event_id) + if event: + state_map[key] = event + + return state_map + async def get_room_sync_data( self, user: UserID, @@ -1074,7 +1173,7 @@ class SlidingSyncHandler: # membership. Currently, we have to make all of these optional because # `invite`/`knock` rooms only have `stripped_state`. See # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 - timeline_events: Optional[List[EventBase]] = None + timeline_events: List[EventBase] = [] bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None limited: Optional[bool] = None prev_batch_token: Optional[StreamToken] = None @@ -1206,7 +1305,7 @@ class SlidingSyncHandler: # Figure out any stripped state events for invite/knocks. This allows the # potential joiner to identify the room. - stripped_state: Optional[List[JsonDict]] = None + stripped_state: List[JsonDict] = [] if room_membership_for_user_at_to_token.membership in ( Membership.INVITE, Membership.KNOCK, @@ -1243,6 +1342,44 @@ class SlidingSyncHandler: # updates. initial = True + # Check whether the room has a name set + name_state_ids = await self.get_current_state_ids_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=StateFilter.from_types([(EventTypes.Name, "")]), + to_token=to_token, + ) + name_event_id = name_state_ids.get((EventTypes.Name, "")) + + room_membership_summary: Mapping[str, MemberSummary] + empty_membership_summary = MemberSummary([], 0) + if room_membership_for_user_at_to_token.membership in ( + Membership.LEAVE, + Membership.BAN, + ): + # TODO: Figure out how to get the membership summary for left/banned rooms + room_membership_summary = {} + else: + room_membership_summary = await self.store.get_room_summary(room_id) + # TODO: Reverse/rewind back to the `to_token` + + # `heroes` are required if the room name is not set. + # + # Note: When you're the first one on your server to be invited to a new room + # over federation, we only have access to some stripped state in + # `event.unsigned.invite_room_state` which currently doesn't include `heroes`, + # see https://github.com/matrix-org/matrix-spec/issues/380. This means that + # clients won't be able to calculate the room name when necessary and just a + # pitfall we have to deal with until that spec issue is resolved. + hero_user_ids: List[str] = [] + # TODO: Should we also check for `EventTypes.CanonicalAlias` + # (`m.room.canonical_alias`) as a fallback for the room name? see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 + if name_event_id is None: + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me=user.to_string() + ) + # Fetch the `required_state` for the room # # No `required_state` for invite/knock rooms (just `stripped_state`) @@ -1253,13 +1390,11 @@ class SlidingSyncHandler: # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 # # Calculate the `StateFilter` based on the `required_state` for the room - room_state: Optional[StateMap[EventBase]] = None - required_room_state: Optional[StateMap[EventBase]] = None + required_state_filter = StateFilter.none() if room_membership_for_user_at_to_token.membership not in ( Membership.INVITE, Membership.KNOCK, ): - required_state_filter = StateFilter.none() # If we have a double wildcard ("*", "*") in the `required_state`, we need # to fetch all state for the room # @@ -1325,86 +1460,65 @@ class SlidingSyncHandler: required_state_filter = StateFilter.from_types(required_state_types) - # We need this base set of info for the response so let's just fetch it along - # with the `required_state` for the room - META_ROOM_STATE = [(EventTypes.Name, ""), (EventTypes.RoomAvatar, "")] + # We need this base set of info for the response so let's just fetch it along + # with the `required_state` for the room + meta_room_state = [(EventTypes.Name, ""), (EventTypes.RoomAvatar, "")] + [ + (EventTypes.Member, hero_user_id) for hero_user_id in hero_user_ids + ] + state_filter = StateFilter.all() + if required_state_filter != StateFilter.all(): state_filter = StateFilter( types=StateFilter.from_types( - chain(META_ROOM_STATE, required_state_filter.to_types()) + chain(meta_room_state, required_state_filter.to_types()) ).types, include_others=required_state_filter.include_others, ) - # We can return all of the state that was requested if this was the first - # time we've sent the room down this connection. - if initial: - # People shouldn't see past their leave/ban event - if room_membership_for_user_at_to_token.membership in ( - Membership.LEAVE, - Membership.BAN, - ): - room_state = await self.storage_controllers.state.get_state_at( - room_id, - stream_position=to_token.copy_and_replace( - StreamKeyType.ROOM, - room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), - ), - state_filter=state_filter, - # Partially-stated rooms should have all state events except for - # remote membership events. Since we've already excluded - # partially-stated rooms unless `required_state` only has - # `["m.room.member", "$LAZY"]` for membership, we should be able to - # retrieve everything requested. When we're lazy-loading, if there - # are some remote senders in the timeline, we should also have their - # membership event because we had to auth that timeline event. Plus - # we don't want to block the whole sync waiting for this one room. - await_full_state=False, - ) - # Otherwise, we can get the latest current state in the room - else: - room_state = await self.storage_controllers.state.get_current_state( - room_id, - state_filter, - # Partially-stated rooms should have all state events except for - # remote membership events. Since we've already excluded - # partially-stated rooms unless `required_state` only has - # `["m.room.member", "$LAZY"]` for membership, we should be able to - # retrieve everything requested. When we're lazy-loading, if there - # are some remote senders in the timeline, we should also have their - # membership event because we had to auth that timeline event. Plus - # we don't want to block the whole sync waiting for this one room. - await_full_state=False, - ) - # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` - else: - # TODO: Once we can figure out if we've sent a room down this connection before, - # we can return updates instead of the full required state. - raise NotImplementedError() + # We can return all of the state that was requested if this was the first + # time we've sent the room down this connection. + room_state: StateMap[EventBase] = {} + if initial: + room_state = await self.get_current_state_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=state_filter, + to_token=to_token, + ) + else: + # TODO: Once we can figure out if we've sent a room down this connection before, + # we can return updates instead of the full required state. + raise NotImplementedError() - if required_state_filter != StateFilter.none(): - required_room_state = required_state_filter.filter_state(room_state) + required_room_state: StateMap[EventBase] = {} + if required_state_filter != StateFilter.none(): + required_room_state = required_state_filter.filter_state(room_state) # Find the room name and avatar from the state room_name: Optional[str] = None + # TODO: Should we also check for `EventTypes.CanonicalAlias` + # (`m.room.canonical_alias`) as a fallback for the room name? see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 + name_event = room_state.get((EventTypes.Name, "")) + if name_event is not None: + room_name = name_event.content.get("name") + room_avatar: Optional[str] = None - if room_state is not None: - name_event = room_state.get((EventTypes.Name, "")) - if name_event is not None: - room_name = name_event.content.get("name") + avatar_event = room_state.get((EventTypes.RoomAvatar, "")) + if avatar_event is not None: + room_avatar = avatar_event.content.get("url") - avatar_event = room_state.get((EventTypes.RoomAvatar, "")) - if avatar_event is not None: - room_avatar = avatar_event.content.get("url") - elif stripped_state is not None: - for event in stripped_state: - if event["type"] == EventTypes.Name: - room_name = event.get("content", {}).get("name") - elif event["type"] == EventTypes.RoomAvatar: - room_avatar = event.get("content", {}).get("url") - - # Found everything so we can stop looking - if room_name is not None and room_avatar is not None: - break + # Assemble heroes: extract the info from the state we just fetched + heroes: List[SlidingSyncResult.RoomResult.StrippedHero] = [] + for hero_user_id in hero_user_ids: + member_event = room_state.get((EventTypes.Member, hero_user_id)) + if member_event is not None: + heroes.append( + SlidingSyncResult.RoomResult.StrippedHero( + user_id=hero_user_id, + display_name=member_event.content.get("displayname"), + avatar_url=member_event.content.get("avatar_url"), + ) + ) # Figure out the last bump event in the room last_bump_event_result = ( @@ -1423,14 +1537,11 @@ class SlidingSyncHandler: return SlidingSyncResult.RoomResult( name=room_name, avatar=room_avatar, - # TODO: Dummy value - heroes=None, + heroes=heroes, # TODO: Dummy value is_dm=False, initial=initial, - required_state=( - list(required_room_state.values()) if required_room_state else None - ), + required_state=list(required_room_state.values()), timeline_events=timeline_events, bundled_aggregations=bundled_aggregations, stripped_state=stripped_state, @@ -1438,9 +1549,12 @@ class SlidingSyncHandler: limited=limited, num_live=num_live, bump_stamp=bump_stamp, - # TODO: Dummy values - joined_count=0, - invited_count=0, + joined_count=room_membership_summary.get( + Membership.JOIN, empty_membership_summary + ).count, + invited_count=room_membership_summary.get( + Membership.INVITE, empty_membership_summary + ).count, # TODO: These are just dummy values. We could potentially just remove these # since notifications can only really be done correctly on the client anyway # (encrypted rooms). diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 94d5faf9f7..1d8cbfdf00 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -997,8 +997,21 @@ class SlidingSyncRestServlet(RestServlet): if room_result.avatar: serialized_rooms[room_id]["avatar"] = room_result.avatar - if room_result.heroes: - serialized_rooms[room_id]["heroes"] = room_result.heroes + if room_result.heroes is not None and len(room_result.heroes) > 0: + serialized_heroes = [] + for hero in room_result.heroes: + serialized_hero = { + "user_id": hero.user_id, + } + if hero.display_name is not None: + # Not a typo, just how "displayname" is spelled in the spec + serialized_hero["displayname"] = hero.display_name + + if hero.avatar_url is not None: + serialized_hero["avatar_url"] = hero.avatar_url + + serialized_heroes.append(serialized_hero) + serialized_rooms[room_id]["heroes"] = serialized_heroes # We should only include the `initial` key if it's `True` to save bandwidth. # The absense of this flag means `False`. @@ -1006,7 +1019,10 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms[room_id]["initial"] = room_result.initial # This will be omitted for invite/knock rooms with `stripped_state` - if room_result.required_state is not None: + if ( + room_result.required_state is not None + and len(room_result.required_state) > 0 + ): serialized_required_state = ( await self.event_serializer.serialize_events( room_result.required_state, @@ -1017,7 +1033,10 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms[room_id]["required_state"] = serialized_required_state # This will be omitted for invite/knock rooms with `stripped_state` - if room_result.timeline_events is not None: + if ( + room_result.timeline_events is not None + and len(room_result.timeline_events) > 0 + ): serialized_timeline = await self.event_serializer.serialize_events( room_result.timeline_events, time_now, @@ -1045,7 +1064,10 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms[room_id]["is_dm"] = room_result.is_dm # Stripped state only applies to invite/knock rooms - if room_result.stripped_state is not None: + if ( + room_result.stripped_state is not None + and len(room_result.stripped_state) > 0 + ): # TODO: `knocked_state` but that isn't specced yet. # # TODO: Instead of adding `knocked_state`, it would be good to rename diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index a8a3a8f242..409120470a 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -200,18 +200,24 @@ class SlidingSyncResult: flag set. (same as sync v2) """ + @attr.s(slots=True, frozen=True, auto_attribs=True) + class StrippedHero: + user_id: str + display_name: Optional[str] + avatar_url: Optional[str] + name: Optional[str] avatar: Optional[str] - heroes: Optional[List[EventBase]] + heroes: Optional[List[StrippedHero]] is_dm: bool initial: bool - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - required_state: Optional[List[EventBase]] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - timeline_events: Optional[List[EventBase]] + # Should be empty for invite/knock rooms with `stripped_state` + required_state: List[EventBase] + # Should be empty for invite/knock rooms with `stripped_state` + timeline_events: List[EventBase] bundled_aggregations: Optional[Dict[str, "BundledAggregations"]] # Optional because it's only relevant to invite/knock rooms - stripped_state: Optional[List[JsonDict]] + stripped_state: List[JsonDict] # Only optional because it won't be included for invite/knock rooms with `stripped_state` prev_batch: Optional[StreamToken] # Only optional because it won't be included for invite/knock rooms with `stripped_state` diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 1e8fe76c99..dbe37bc712 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -200,9 +200,6 @@ class SlidingSyncBody(RequestBodyModel): } timeline_limit: The maximum number of timeline events to return per response. - include_heroes: Return a stripped variant of membership events (containing - `user_id` and optionally `avatar_url` and `displayname`) for the users used - to calculate the room name. filters: Filters to apply to the list before sorting. """ @@ -270,7 +267,6 @@ class SlidingSyncBody(RequestBodyModel): else: ranges: Optional[List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]]] = None # type: ignore[valid-type] slow_get_all_rooms: Optional[StrictBool] = False - include_heroes: Optional[StrictBool] = False filters: Optional[Filters] = None class RoomSubscription(CommonRoomParameters): diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 304c0d4d3d..0d0bea538b 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -1813,8 +1813,8 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): def test_rooms_meta_when_joined(self) -> None: """ - Test that the `rooms` `name` and `avatar` (soon to test `heroes`) are included - in the response when the user is joined to the room. + Test that the `rooms` `name` and `avatar` are included in the response and + reflect the current state of the room when the user is joined to the room. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1866,11 +1866,19 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): "mxc://DUMMY_MEDIA_ID", channel.json_body["rooms"][room_id1], ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + 2, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + 0, + ) def test_rooms_meta_when_invited(self) -> None: """ - Test that the `rooms` `name` and `avatar` (soon to test `heroes`) are included - in the response when the user is invited to the room. + Test that the `rooms` `name` and `avatar` are included in the response and + reflect the current state of the room when the user is invited to the room. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1892,7 +1900,8 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): tok=user2_tok, ) - self.helper.join(room_id1, user1_id, tok=user1_tok) + # User1 is invited to the room + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) # Update the room name after user1 has left self.helper.send_state( @@ -1938,11 +1947,19 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): "mxc://UPDATED_DUMMY_MEDIA_ID", channel.json_body["rooms"][room_id1], ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + 1, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + 1, + ) def test_rooms_meta_when_banned(self) -> None: """ - Test that the `rooms` `name` and `avatar` (soon to test `heroes`) reflect the - state of the room when the user was banned (do not leak current state). + Test that the `rooms` `name` and `avatar` reflect the state of the room when the + user was banned (do not leak current state). """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -2010,6 +2027,273 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): "mxc://DUMMY_MEDIA_ID", channel.json_body["rooms"][room_id1], ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + # FIXME: The actual number should be "1" (user2) but we currently don't + # support this for rooms where the user has left/been banned. + 0, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + 0, + ) + + def test_rooms_meta_heroes(self) -> None: + """ + Test that the `rooms` `heroes` are included in the response when the room + doesn't have a room name set. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + self.helper.join(room_id1, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok) + + room_id2 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room2", + }, + ) + self.helper.join(room_id2, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id2, src=user2_id, targ=user3_id, tok=user2_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Room1 has a name so we shouldn't see any `heroes` which the client would use + # the calculate the room name themselves. + self.assertEqual( + channel.json_body["rooms"][room_id1]["name"], + "my super room", + channel.json_body["rooms"][room_id1], + ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("heroes")) + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + 2, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + 1, + ) + + # Room2 doesn't have a name so we should see `heroes` populated + self.assertIsNone(channel.json_body["rooms"][room_id2].get("name")) + self.assertCountEqual( + [ + hero["user_id"] + for hero in channel.json_body["rooms"][room_id2].get("heroes", []) + ], + # Heroes shouldn't include the user themselves (we shouldn't see user1) + [user2_id, user3_id], + ) + self.assertEqual( + channel.json_body["rooms"][room_id2]["joined_count"], + 2, + ) + self.assertEqual( + channel.json_body["rooms"][room_id2]["invited_count"], + 1, + ) + + # We didn't request any state so we shouldn't see any `required_state` + self.assertIsNone(channel.json_body["rooms"][room_id1].get("required_state")) + self.assertIsNone(channel.json_body["rooms"][room_id2].get("required_state")) + + def test_rooms_meta_heroes_max(self) -> None: + """ + Test that the `rooms` `heroes` only includes the first 5 users (not including + yourself). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + user6_id = self.register_user("user6", "pass") + user6_tok = self.login(user6_id, "pass") + user7_id = self.register_user("user7", "pass") + user7_tok = self.login(user7_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room", + }, + ) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + self.helper.join(room_id1, user5_id, tok=user5_tok) + self.helper.join(room_id1, user6_id, tok=user6_tok) + self.helper.join(room_id1, user7_id, tok=user7_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Room2 doesn't have a name so we should see `heroes` populated + self.assertIsNone(channel.json_body["rooms"][room_id1].get("name")) + # FIXME: Remove this basic assertion and uncomment the better assertion below + # after https://github.com/element-hq/synapse/pull/17435 merges + self.assertEqual(len(channel.json_body["rooms"][room_id1].get("heroes", [])), 5) + # self.assertCountEqual( + # [ + # hero["user_id"] + # for hero in channel.json_body["rooms"][room_id1].get("heroes", []) + # ], + # # Heroes should be the first 5 users in the room (excluding the user + # # themselves, we shouldn't see `user1`) + # [user2_id, user3_id, user4_id, user5_id, user6_id], + # ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + 7, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + 0, + ) + + # We didn't request any state so we shouldn't see any `required_state` + self.assertIsNone(channel.json_body["rooms"][room_id1].get("required_state")) + + def test_rooms_meta_heroes_when_banned(self) -> None: + """ + Test that the `rooms` `heroes` are included in the response when the room + doesn't have a room name set but doesn't leak information past their ban. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + _user5_tok = self.login(user5_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room", + }, + ) + # User1 joins the room + self.helper.join(room_id1, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok) + + # User1 is banned from the room + self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # User4 joins the room after user1 is banned + self.helper.join(room_id1, user4_id, tok=user4_tok) + # User5 is invited after user1 is banned + self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Room2 doesn't have a name so we should see `heroes` populated + self.assertIsNone(channel.json_body["rooms"][room_id1].get("name")) + self.assertCountEqual( + [ + hero["user_id"] + for hero in channel.json_body["rooms"][room_id1].get("heroes", []) + ], + # Heroes shouldn't include the user themselves (we shouldn't see user1). We + # also shouldn't see user4 since they joined after user1 was banned. + # + # FIXME: The actual result should be `[user2_id, user3_id]` but we currently + # don't support this for rooms where the user has left/been banned. + [], + ) + + self.assertEqual( + channel.json_body["rooms"][room_id1]["joined_count"], + # FIXME: The actual number should be "1" (user2) but we currently don't + # support this for rooms where the user has left/been banned. + 0, + ) + self.assertEqual( + channel.json_body["rooms"][room_id1]["invited_count"], + # We shouldn't see user5 since they were invited after user1 was banned. + # + # FIXME: The actual number should be "1" (user3) but we currently don't + # support this for rooms where the user has left/been banned. + 0, + ) def test_rooms_limited_initial_sync(self) -> None: """ @@ -3081,11 +3365,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200, channel.json_body) # Nothing to see for this banned user in the room in the token range - self.assertEqual( - channel.json_body["rooms"][room_id1]["timeline"], - [], - channel.json_body["rooms"][room_id1]["timeline"], - ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("timeline")) # No events returned in the timeline so nothing is "live" self.assertEqual( channel.json_body["rooms"][room_id1]["num_live"], From fb66e938b26e96384af5a72c71ed7d9dec12f1a2 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 11 Jul 2024 18:19:26 -0500 Subject: [PATCH 031/332] Add `is_dm` room field to Sliding Sync `/sync` (#17429) Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17429.feature | 1 + synapse/handlers/sliding_sync.py | 75 ++++++++++++++++++++------------ tests/rest/client/test_sync.py | 23 ++++++++++ 3 files changed, 70 insertions(+), 29 deletions(-) create mode 100644 changelog.d/17429.feature diff --git a/changelog.d/17429.feature b/changelog.d/17429.feature new file mode 100644 index 0000000000..608b75d632 --- /dev/null +++ b/changelog.d/17429.feature @@ -0,0 +1 @@ +Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index e3230d28e6..904787ced3 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -291,6 +291,7 @@ class _RoomMembershipForUser: sender: The person who sent the membership event newly_joined: Whether the user newly joined the room during the given token range + is_dm: Whether this user considers this room as a direct-message (DM) room """ room_id: str @@ -299,6 +300,7 @@ class _RoomMembershipForUser: membership: str sender: Optional[str] newly_joined: bool + is_dm: bool def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser": return attr.evolve(self, **kwds) @@ -613,6 +615,7 @@ class SlidingSyncHandler: membership=room_for_user.membership, sender=room_for_user.sender, newly_joined=False, + is_dm=False, ) for room_for_user in room_for_user_list } @@ -652,6 +655,7 @@ class SlidingSyncHandler: # - 1c) Update room membership events to the point in time of the `to_token` # - 2) Add back newly_left rooms (> `from_token` and <= `to_token`) # - 3) Figure out which rooms are `newly_joined` + # - 4) Figure out which rooms are DM's # 1) ----------------------------------------------------- @@ -714,6 +718,7 @@ class SlidingSyncHandler: membership=first_membership_change_after_to_token.prev_membership, sender=first_membership_change_after_to_token.prev_sender, newly_joined=False, + is_dm=False, ) else: # If we can't find the previous membership event, we shouldn't @@ -809,6 +814,7 @@ class SlidingSyncHandler: membership=last_membership_change_in_from_to_range.membership, sender=last_membership_change_in_from_to_range.sender, newly_joined=False, + is_dm=False, ) # 3) Figure out `newly_joined` @@ -846,6 +852,35 @@ class SlidingSyncHandler: room_id ].copy_and_replace(newly_joined=True) + # 4) Figure out which rooms the user considers to be direct-message (DM) rooms + # + # We're using global account data (`m.direct`) instead of checking for + # `is_direct` on membership events because that property only appears for + # the invitee membership event (doesn't show up for the inviter). + # + # We're unable to take `to_token` into account for global account data since + # we only keep track of the latest account data for the user. + dm_map = await self.store.get_global_account_data_by_type_for_user( + user_id, AccountDataTypes.DIRECT + ) + + # Flatten out the map. Account data is set by the client so it needs to be + # scrutinized. + dm_room_id_set = set() + if isinstance(dm_map, dict): + for room_ids in dm_map.values(): + # Account data should be a list of room IDs. Ignore anything else + if isinstance(room_ids, list): + for room_id in room_ids: + if isinstance(room_id, str): + dm_room_id_set.add(room_id) + + # 4) Fixup + for room_id in filtered_sync_room_id_set: + filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[ + room_id + ].copy_and_replace(is_dm=room_id in dm_room_id_set) + return filtered_sync_room_id_set async def filter_rooms( @@ -869,41 +904,24 @@ class SlidingSyncHandler: A filtered dictionary of room IDs along with membership information in the room at the time of `to_token`. """ - user_id = user.to_string() - - # TODO: Apply filters - filtered_room_id_set = set(sync_room_map.keys()) # Filter for Direct-Message (DM) rooms if filters.is_dm is not None: - # We're using global account data (`m.direct`) instead of checking for - # `is_direct` on membership events because that property only appears for - # the invitee membership event (doesn't show up for the inviter). Account - # data is set by the client so it needs to be scrutinized. - # - # We're unable to take `to_token` into account for global account data since - # we only keep track of the latest account data for the user. - dm_map = await self.store.get_global_account_data_by_type_for_user( - user_id, AccountDataTypes.DIRECT - ) - - # Flatten out the map - dm_room_id_set = set() - if isinstance(dm_map, dict): - for room_ids in dm_map.values(): - # Account data should be a list of room IDs. Ignore anything else - if isinstance(room_ids, list): - for room_id in room_ids: - if isinstance(room_id, str): - dm_room_id_set.add(room_id) - if filters.is_dm: # Only DM rooms please - filtered_room_id_set = filtered_room_id_set.intersection(dm_room_id_set) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if sync_room_map[room_id].is_dm + } else: # Only non-DM rooms please - filtered_room_id_set = filtered_room_id_set.difference(dm_room_id_set) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if not sync_room_map[room_id].is_dm + } if filters.spaces: raise NotImplementedError() @@ -1538,8 +1556,7 @@ class SlidingSyncHandler: name=room_name, avatar=room_avatar, heroes=heroes, - # TODO: Dummy value - is_dm=False, + is_dm=room_membership_for_user_at_to_token.is_dm, initial=initial, required_state=list(required_room_state.values()), timeline_events=timeline_events, diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 0d0bea538b..4236812db5 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -1662,6 +1662,20 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): list(channel.json_body["lists"]["room-invites"]), ) + # Ensure DM's are correctly marked + self.assertDictEqual( + { + room_id: room.get("is_dm") + for room_id, room in channel.json_body["rooms"].items() + }, + { + invite_room_id: None, + room_id: None, + invited_dm_room_id: True, + joined_dm_room_id: True, + }, + ) + def test_sort_list(self) -> None: """ Test that the `lists` are sorted by `stream_ordering` @@ -1874,6 +1888,9 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["rooms"][room_id1]["invited_count"], 0, ) + self.assertIsNone( + channel.json_body["rooms"][room_id1].get("is_dm"), + ) def test_rooms_meta_when_invited(self) -> None: """ @@ -1955,6 +1972,9 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["rooms"][room_id1]["invited_count"], 1, ) + self.assertIsNone( + channel.json_body["rooms"][room_id1].get("is_dm"), + ) def test_rooms_meta_when_banned(self) -> None: """ @@ -2037,6 +2057,9 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["rooms"][room_id1]["invited_count"], 0, ) + self.assertIsNone( + channel.json_body["rooms"][room_id1].get("is_dm"), + ) def test_rooms_meta_heroes(self) -> None: """ From ab62aa09da4a3c4444d80a9d3a899c685f1bb798 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 15 Jul 2024 04:37:10 -0500 Subject: [PATCH 032/332] Add room subscriptions to Sliding Sync `/sync` (#17432) Add room subscriptions to Sliding Sync `/sync` Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync Currently, you can only subscribe to rooms you have had *any* membership in before. In the future, we will allow `world_readable` rooms to be subscribed to without joining. --- changelog.d/17432.feature | 1 + synapse/handlers/sliding_sync.py | 403 ++++++++--- tests/handlers/test_sliding_sync.py | 1045 ++++++++++++++++++++++----- tests/rest/client/test_sync.py | 347 +++++++-- tests/unittest.py | 51 ++ 5 files changed, 1489 insertions(+), 358 deletions(-) create mode 100644 changelog.d/17432.feature diff --git a/changelog.d/17432.feature b/changelog.d/17432.feature new file mode 100644 index 0000000000..c86f04c118 --- /dev/null +++ b/changelog.d/17432.feature @@ -0,0 +1 @@ +Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 904787ced3..be98b379eb 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -62,32 +62,79 @@ DEFAULT_BUMP_EVENT_TYPES = { } +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _RoomMembershipForUser: + """ + Attributes: + room_id: The room ID of the membership event + event_id: The event ID of the membership event + event_pos: The stream position of the membership event + membership: The membership state of the user in the room + sender: The person who sent the membership event + newly_joined: Whether the user newly joined the room during the given token + range and is still joined to the room at the end of this range. + newly_left: Whether the user newly left (or kicked) the room during the given + token range and is still "leave" at the end of this range. + is_dm: Whether this user considers this room as a direct-message (DM) room + """ + + room_id: str + # Optional because state resets can affect room membership without a corresponding event. + event_id: Optional[str] + # Even during a state reset which removes the user from the room, we expect this to + # be set because `current_state_delta_stream` will note the position that the reset + # happened. + event_pos: PersistedEventPosition + # Even during a state reset which removes the user from the room, we expect this to + # be set to `LEAVE` because we can make that assumption based on the situaton (see + # `get_current_state_delta_membership_changes_for_user(...)`) + membership: str + # Optional because state resets can affect room membership without a corresponding event. + sender: Optional[str] + newly_joined: bool + newly_left: bool + is_dm: bool + + def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser": + return attr.evolve(self, **kwds) + + def filter_membership_for_sync( - *, membership: str, user_id: str, sender: Optional[str] + *, user_id: str, room_membership_for_user: _RoomMembershipForUser ) -> bool: """ Returns True if the membership event should be included in the sync response, otherwise False. Attributes: - membership: The membership state of the user in the room. user_id: The user ID that the membership applies to - sender: The person who sent the membership event + room_membership_for_user: Membership information for the user in the room """ - # Everything except `Membership.LEAVE` because we want everything that's *still* - # relevant to the user. There are few more things to include in the sync response - # (newly_left) but those are handled separately. + membership = room_membership_for_user.membership + sender = room_membership_for_user.sender + newly_left = room_membership_for_user.newly_left + + # We want to allow everything except rooms the user has left unless `newly_left` + # because we want everything that's *still* relevant to the user. We include + # `newly_left` rooms because the last event that the user should see is their own + # leave event. # - # This logic includes kicks (leave events where the sender is not the same user) and - # can be read as "anything that isn't a leave or a leave with a different sender". + # A leave != kick. This logic includes kicks (leave events where the sender is not + # the same user). # - # When `sender=None` and `membership=Membership.LEAVE`, it means that a state reset - # happened that removed the user from the room, or the user was the last person - # locally to leave the room which caused the server to leave the room. In both - # cases, we can just remove the rooms since they are no longer relevant to the user. - # They could still be added back later if they are `newly_left`. - return membership != Membership.LEAVE or sender not in (user_id, None) + # When `sender=None`, it means that a state reset happened that removed the user + # from the room without a corresponding leave event. We can just remove the rooms + # since they are no longer relevant to the user but will still appear if they are + # `newly_left`. + return ( + # Anything except leave events + membership != Membership.LEAVE + # Unless... + or newly_left + # Allow kicks + or (membership == Membership.LEAVE and sender not in (user_id, None)) + ) # We can't freeze this class because we want to update it in place with the @@ -281,31 +328,6 @@ class StateValues: LAZY: Final = "$LAZY" -@attr.s(slots=True, frozen=True, auto_attribs=True) -class _RoomMembershipForUser: - """ - Attributes: - event_id: The event ID of the membership event - event_pos: The stream position of the membership event - membership: The membership state of the user in the room - sender: The person who sent the membership event - newly_joined: Whether the user newly joined the room during the given token - range - is_dm: Whether this user considers this room as a direct-message (DM) room - """ - - room_id: str - event_id: Optional[str] - event_pos: PersistedEventPosition - membership: str - sender: Optional[str] - newly_joined: bool - is_dm: bool - - def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser": - return attr.evolve(self, **kwds) - - class SlidingSyncHandler: def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @@ -424,18 +446,31 @@ class SlidingSyncHandler: # See https://github.com/matrix-org/matrix-doc/issues/1144 raise NotImplementedError() + # Get all of the room IDs that the user should be able to see in the sync + # response + has_lists = sync_config.lists is not None and len(sync_config.lists) > 0 + has_room_subscriptions = ( + sync_config.room_subscriptions is not None + and len(sync_config.room_subscriptions) > 0 + ) + if has_lists or has_room_subscriptions: + room_membership_for_user_map = ( + await self.get_room_membership_for_user_at_to_token( + user=sync_config.user, + to_token=to_token, + from_token=from_token, + ) + ) + # Assemble sliding window lists lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} # Keep track of the rooms that we're going to display and need to fetch more # info about relevant_room_map: Dict[str, RoomSyncConfig] = {} - if sync_config.lists: - # Get all of the room IDs that the user should be able to see in the sync - # response - sync_room_map = await self.get_sync_room_ids_for_user( - sync_config.user, - from_token=from_token, - to_token=to_token, + if has_lists and sync_config.lists is not None: + sync_room_map = await self.filter_rooms_relevant_for_sync( + user=sync_config.user, + room_membership_for_user_map=room_membership_for_user_map, ) for list_key, list_config in sync_config.lists.items(): @@ -524,7 +559,35 @@ class SlidingSyncHandler: ops=ops, ) - # TODO: if (sync_config.room_subscriptions): + # Handle room subscriptions + if has_room_subscriptions and sync_config.room_subscriptions is not None: + for room_id, room_subscription in sync_config.room_subscriptions.items(): + room_membership_for_user_at_to_token = ( + await self.check_room_subscription_allowed_for_user( + room_id=room_id, + room_membership_for_user_map=room_membership_for_user_map, + to_token=to_token, + ) + ) + + # Skip this room if the user isn't allowed to see it + if not room_membership_for_user_at_to_token: + continue + + room_membership_for_user_map[room_id] = ( + room_membership_for_user_at_to_token + ) + + # Take the superset of the `RoomSyncConfig` for each room. + # + # Update our `relevant_room_map` with the room we're going to display + # and need to fetch more info about. + room_sync_config = RoomSyncConfig.from_room_config(room_subscription) + existing_room_sync_config = relevant_room_map.get(room_id) + if existing_room_sync_config is not None: + existing_room_sync_config.combine_room_sync_config(room_sync_config) + else: + relevant_room_map[room_id] = room_sync_config # Fetch room data rooms: Dict[str, SlidingSyncResult.RoomResult] = {} @@ -533,7 +596,9 @@ class SlidingSyncHandler: user=sync_config.user, room_id=room_id, room_sync_config=room_sync_config, - room_membership_for_user_at_to_token=sync_room_map[room_id], + room_membership_for_user_at_to_token=room_membership_for_user_map[ + room_id + ], from_token=from_token, to_token=to_token, ) @@ -551,28 +616,23 @@ class SlidingSyncHandler: extensions=extensions, ) - async def get_sync_room_ids_for_user( + async def get_room_membership_for_user_at_to_token( self, user: UserID, to_token: StreamToken, - from_token: Optional[StreamToken] = None, + from_token: Optional[StreamToken], ) -> Dict[str, _RoomMembershipForUser]: """ - Fetch room IDs that should be listed for this user in the sync response (the - full room list that will be filtered, sorted, and sliced). + Fetch room IDs that the user has had membership in (the full room list including + long-lost left rooms that will be filtered, sorted, and sliced). - We're looking for rooms where the user has the following state in the token - range (> `from_token` and <= `to_token`): + We're looking for rooms where the user has had any sort of membership in the + token range (> `from_token` and <= `to_token`) - - `invite`, `join`, `knock`, `ban` membership events - - Kicks (`leave` membership events where `sender` is different from the - `user_id`/`state_key`) - - `newly_left` (rooms that were left during the given token range) - - In order for bans/kicks to not show up in sync, you need to `/forget` those - rooms. This doesn't modify the event itself though and only adds the - `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way - to tell when a room was forgotten at the moment so we can't factor it into the - from/to range. + In order for bans/kicks to not show up, you need to `/forget` those rooms. This + doesn't modify the event itself though and only adds the `forgotten` flag to the + `room_memberships` table in Synapse. There isn't a way to tell when a room was + forgotten at the moment so we can't factor it into the token range. Args: user: User to fetch rooms for @@ -580,8 +640,8 @@ class SlidingSyncHandler: from_token: The point in the stream to sync from. Returns: - A dictionary of room IDs that should be listed in the sync response along - with membership information in that room at the time of `to_token`. + A dictionary of room IDs that the user has had membership in along with + membership information in that room at the time of `to_token`. """ user_id = user.to_string() @@ -592,9 +652,6 @@ class SlidingSyncHandler: # We want to fetch any kind of membership (joined and left rooms) in order # to get the `event_pos` of the latest room membership event for the # user. - # - # We will filter out the rooms that don't belong below (see - # `filter_membership_for_sync`) membership_list=Membership.LIST, excluded_rooms=self.rooms_to_exclude_globally, ) @@ -614,7 +671,9 @@ class SlidingSyncHandler: event_pos=room_for_user.event_pos, membership=room_for_user.membership, sender=room_for_user.sender, + # We will update these fields below to be accurate newly_joined=False, + newly_left=False, is_dm=False, ) for room_for_user in room_for_user_list @@ -653,12 +712,10 @@ class SlidingSyncHandler: # - 1a) Remove rooms that the user joined after the `to_token` # - 1b) Add back rooms that the user left after the `to_token` # - 1c) Update room membership events to the point in time of the `to_token` - # - 2) Add back newly_left rooms (> `from_token` and <= `to_token`) - # - 3) Figure out which rooms are `newly_joined` + # - 2) Figure out which rooms are `newly_left` rooms (> `from_token` and <= `to_token`) + # - 3) Figure out which rooms are `newly_joined` (> `from_token` and <= `to_token`) # - 4) Figure out which rooms are DM's - # 1) ----------------------------------------------------- - # 1) Fetch membership changes that fall in the range from `to_token` up to # `membership_snapshot_token` # @@ -717,7 +774,9 @@ class SlidingSyncHandler: event_pos=first_membership_change_after_to_token.prev_event_pos, membership=first_membership_change_after_to_token.prev_membership, sender=first_membership_change_after_to_token.prev_sender, + # We will update these fields below to be accurate newly_joined=False, + newly_left=False, is_dm=False, ) else: @@ -726,22 +785,6 @@ class SlidingSyncHandler: # exact membership state and shouldn't rely on the current snapshot. sync_room_id_set.pop(room_id, None) - # Filter the rooms that that we have updated room membership events to the point - # in time of the `to_token` (from the "1)" fixups) - filtered_sync_room_id_set = { - room_id: room_membership_for_user - for room_id, room_membership_for_user in sync_room_id_set.items() - if filter_membership_for_sync( - membership=room_membership_for_user.membership, - user_id=user_id, - sender=room_membership_for_user.sender, - ) - } - - # 2) ----------------------------------------------------- - # We fix-up newly_left rooms after the first fixup because it may have removed - # some left rooms that we can figure out are newly_left in the following code - # 2) Fetch membership changes that fall in the range from `from_token` up to `to_token` current_state_delta_membership_changes_in_from_to_range = [] if from_token: @@ -803,19 +846,40 @@ class SlidingSyncHandler: if last_membership_change_in_from_to_range.membership == Membership.JOIN: possibly_newly_joined_room_ids.add(room_id) - # 2) Add back newly_left rooms (> `from_token` and <= `to_token`). We - # include newly_left rooms because the last event that the user should see - # is their own leave event + # 2) Figure out newly_left rooms (> `from_token` and <= `to_token`). if last_membership_change_in_from_to_range.membership == Membership.LEAVE: - filtered_sync_room_id_set[room_id] = _RoomMembershipForUser( - room_id=room_id, - event_id=last_membership_change_in_from_to_range.event_id, - event_pos=last_membership_change_in_from_to_range.event_pos, - membership=last_membership_change_in_from_to_range.membership, - sender=last_membership_change_in_from_to_range.sender, - newly_joined=False, - is_dm=False, - ) + # 2) Mark this room as `newly_left` + + # If we're seeing a membership change here, we should expect to already + # have it in our snapshot but if a state reset happens, it wouldn't have + # shown up in our snapshot but appear as a change here. + existing_sync_entry = sync_room_id_set.get(room_id) + if existing_sync_entry is not None: + # Normal expected case + sync_room_id_set[room_id] = existing_sync_entry.copy_and_replace( + newly_left=True + ) + else: + # State reset! + logger.warn( + "State reset detected for room_id %s with %s who is no longer in the room", + room_id, + user_id, + ) + # Even though a state reset happened which removed the person from + # the room, we still add it the list so the user knows they left the + # room. Downstream code can check for a state reset by looking for + # `event_id=None and membership is not None`. + sync_room_id_set[room_id] = _RoomMembershipForUser( + room_id=room_id, + event_id=last_membership_change_in_from_to_range.event_id, + event_pos=last_membership_change_in_from_to_range.event_pos, + membership=last_membership_change_in_from_to_range.membership, + sender=last_membership_change_in_from_to_range.sender, + newly_joined=False, + newly_left=True, + is_dm=False, + ) # 3) Figure out `newly_joined` for room_id in possibly_newly_joined_room_ids: @@ -826,9 +890,9 @@ class SlidingSyncHandler: # also some non-join in the range, we know they `newly_joined`. if has_non_join_in_from_to_range: # We found a `newly_joined` room (we left and joined within the token range) - filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[ - room_id - ].copy_and_replace(newly_joined=True) + sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( + newly_joined=True + ) else: prev_event_id = first_membership_change_by_room_id_in_from_to_range[ room_id @@ -840,7 +904,7 @@ class SlidingSyncHandler: if prev_event_id is None: # We found a `newly_joined` room (we are joining the room for the # first time within the token range) - filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[ + sync_room_id_set[room_id] = sync_room_id_set[ room_id ].copy_and_replace(newly_joined=True) # Last resort, we need to step back to the previous membership event @@ -848,7 +912,7 @@ class SlidingSyncHandler: elif prev_membership != Membership.JOIN: # We found a `newly_joined` room (we left before the token range # and joined within the token range) - filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[ + sync_room_id_set[room_id] = sync_room_id_set[ room_id ].copy_and_replace(newly_joined=True) @@ -876,12 +940,122 @@ class SlidingSyncHandler: dm_room_id_set.add(room_id) # 4) Fixup - for room_id in filtered_sync_room_id_set: - filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[ - room_id - ].copy_and_replace(is_dm=room_id in dm_room_id_set) + for room_id in sync_room_id_set: + sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( + is_dm=room_id in dm_room_id_set + ) - return filtered_sync_room_id_set + return sync_room_id_set + + async def filter_rooms_relevant_for_sync( + self, + user: UserID, + room_membership_for_user_map: Dict[str, _RoomMembershipForUser], + ) -> Dict[str, _RoomMembershipForUser]: + """ + Filter room IDs that should/can be listed for this user in the sync response (the + full room list that will be further filtered, sorted, and sliced). + + We're looking for rooms where the user has the following state in the token + range (> `from_token` and <= `to_token`): + + - `invite`, `join`, `knock`, `ban` membership events + - Kicks (`leave` membership events where `sender` is different from the + `user_id`/`state_key`) + - `newly_left` (rooms that were left during the given token range) + - In order for bans/kicks to not show up in sync, you need to `/forget` those + rooms. This doesn't modify the event itself though and only adds the + `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way + to tell when a room was forgotten at the moment so we can't factor it into the + from/to range. + + Args: + user: User that is syncing + room_membership_for_user_map: Room membership for the user + + Returns: + A dictionary of room IDs that should be listed in the sync response along + with membership information in that room at the time of `to_token`. + """ + user_id = user.to_string() + + # Filter rooms to only what we're interested to sync with + filtered_sync_room_map = { + room_id: room_membership_for_user + for room_id, room_membership_for_user in room_membership_for_user_map.items() + if filter_membership_for_sync( + user_id=user_id, + room_membership_for_user=room_membership_for_user, + ) + } + + return filtered_sync_room_map + + async def check_room_subscription_allowed_for_user( + self, + room_id: str, + room_membership_for_user_map: Dict[str, _RoomMembershipForUser], + to_token: StreamToken, + ) -> Optional[_RoomMembershipForUser]: + """ + Check whether the user is allowed to see the room based on whether they have + ever had membership in the room or if the room is `world_readable`. + + Similar to `check_user_in_room_or_world_readable(...)` + + Args: + room_id: Room to check + room_membership_for_user_map: Room membership for the user at the time of + the `to_token` (<= `to_token`). + to_token: The token to fetch rooms up to. + + Returns: + The room membership for the user if they are allowed to subscribe to the + room else `None`. + """ + + # We can first check if they are already allowed to see the room based + # on our previous work to assemble the `room_membership_for_user_map`. + # + # If they have had any membership in the room over time (up to the `to_token`), + # let them subscribe and see what they can. + existing_membership_for_user = room_membership_for_user_map.get(room_id) + if existing_membership_for_user is not None: + return existing_membership_for_user + + # TODO: Handle `world_readable` rooms + return None + + # If the room is `world_readable`, it doesn't matter whether they can join, + # everyone can see the room. + # not_in_room_membership_for_user = _RoomMembershipForUser( + # room_id=room_id, + # event_id=None, + # event_pos=None, + # membership=None, + # sender=None, + # newly_joined=False, + # newly_left=False, + # is_dm=False, + # ) + # room_state = await self.get_current_state_at( + # room_id=room_id, + # room_membership_for_user_at_to_token=not_in_room_membership_for_user, + # state_filter=StateFilter.from_types( + # [(EventTypes.RoomHistoryVisibility, "")] + # ), + # to_token=to_token, + # ) + + # visibility_event = room_state.get((EventTypes.RoomHistoryVisibility, "")) + # if ( + # visibility_event is not None + # and visibility_event.content.get("history_visibility") + # == HistoryVisibility.WORLD_READABLE + # ): + # return not_in_room_membership_for_user + + # return None async def filter_rooms( self, @@ -1081,7 +1255,6 @@ class SlidingSyncHandler: in the room at the time of `to_token`. to_token: The point in the stream to sync up to. """ - room_state_ids: StateMap[str] # People shouldn't see past their leave/ban event if room_membership_for_user_at_to_token.membership in ( @@ -1349,10 +1522,10 @@ class SlidingSyncHandler: stripped_state.append(strip_event(invite_or_knock_event)) # TODO: Handle state resets. For example, if we see - # `room_membership_for_user_at_to_token.membership = Membership.LEAVE` but - # `required_state` doesn't include it, we should indicate to the client that a - # state reset happened. Perhaps we should indicate this by setting `initial: - # True` and empty `required_state`. + # `room_membership_for_user_at_to_token.event_id=None and + # room_membership_for_user_at_to_token.membership is not None`, we should + # indicate to the client that a state reset happened. Perhaps we should indicate + # this by setting `initial: True` and empty `required_state`. # TODO: Since we can't determine whether we've already sent a room down this # Sliding Sync connection before (we plan to add this optimization in the diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index eb4b0a05c7..a7aa9bb8af 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -19,7 +19,7 @@ # import logging from copy import deepcopy -from typing import Optional +from typing import Dict, Optional from unittest.mock import patch from parameterized import parameterized @@ -37,12 +37,16 @@ from synapse.api.constants import ( from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext -from synapse.handlers.sliding_sync import RoomSyncConfig, StateValues +from synapse.handlers.sliding_sync import ( + RoomSyncConfig, + StateValues, + _RoomMembershipForUser, +) from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, StreamToken, UserID from synapse.types.handlers import SlidingSyncConfig from synapse.util import Clock @@ -581,9 +585,9 @@ class RoomSyncConfigTestCase(TestCase): self._assert_room_config_equal(room_sync_config_b, expected, "A into B") -class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): +class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): """ - Tests Sliding Sync handler `get_sync_room_ids_for_user()` to make sure it returns + Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it returns the correct list of rooms IDs. """ @@ -616,7 +620,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): now_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=now_token, to_token=now_token, @@ -643,7 +647,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_token, to_token=after_room_token, @@ -657,9 +661,11 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[room_id].event_id, join_response["event_id"], ) + self.assertEqual(room_id_results[room_id].membership, Membership.JOIN) # We should be considered `newly_joined` because we joined during the token # range self.assertEqual(room_id_results[room_id].newly_joined, True) + self.assertEqual(room_id_results[room_id].newly_left, False) def test_get_already_joined_room(self) -> None: """ @@ -676,7 +682,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room_token, to_token=after_room_token, @@ -690,8 +696,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[room_id].event_id, join_response["event_id"], ) + self.assertEqual(room_id_results[room_id].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range self.assertEqual(room_id_results[room_id].newly_joined, False) + self.assertEqual(room_id_results[room_id].newly_left, False) def test_get_invited_banned_knocked_room(self) -> None: """ @@ -748,7 +756,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_token, to_token=after_room_token, @@ -770,19 +778,25 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[invited_room_id].event_id, invite_response["event_id"], ) + self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE) + self.assertEqual(room_id_results[invited_room_id].newly_joined, False) + self.assertEqual(room_id_results[invited_room_id].newly_left, False) + self.assertEqual( room_id_results[ban_room_id].event_id, ban_response["event_id"], ) + self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN) + self.assertEqual(room_id_results[ban_room_id].newly_joined, False) + self.assertEqual(room_id_results[ban_room_id].newly_left, False) + self.assertEqual( room_id_results[knock_room_id].event_id, knock_room_membership_state_event.event_id, ) - # We should *NOT* be `newly_joined` because we were not joined at the the time - # of the `to_token`. - self.assertEqual(room_id_results[invited_room_id].newly_joined, False) - self.assertEqual(room_id_results[ban_room_id].newly_joined, False) + self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK) self.assertEqual(room_id_results[knock_room_id].newly_joined, False) + self.assertEqual(room_id_results[knock_room_id].newly_left, False) def test_get_kicked_room(self) -> None: """ @@ -814,7 +828,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_kick_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_kick_token, to_token=after_kick_token, @@ -828,9 +842,12 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[kick_room_id].event_id, kick_response["event_id"], ) + self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) + self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) # We should *NOT* be `newly_joined` because we were not joined at the the time # of the `to_token`. self.assertEqual(room_id_results[kick_room_id].newly_joined, False) + self.assertEqual(room_id_results[kick_room_id].newly_left, False) def test_forgotten_rooms(self) -> None: """ @@ -904,7 +921,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.assertEqual(channel.code, 200, channel.result) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_forgets, to_token=before_room_forgets, @@ -914,52 +931,58 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): # We shouldn't see the room because it was forgotten self.assertEqual(room_id_results.keys(), set()) - def test_only_newly_left_rooms_show_up(self) -> None: + def test_newly_left_rooms(self) -> None: """ - Test that newly_left rooms still show up in the sync response but rooms that - were left before the `from_token` don't show up. See condition "2)" comments in - the `get_sync_room_ids_for_user` method. + Test that newly_left are marked properly """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") # Leave before we calculate the `from_token` room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.leave(room_id1, user1_id, tok=user1_tok) + leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() # Leave during the from_token/to_token range (newly_left) room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - _leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) + leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) after_room2_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room2_token, ) ) - # Only the newly_left room should show up - self.assertEqual(room_id_results.keys(), {room_id2}) - # It should be pointing to the latest membership event in the from/to range but - # the `event_id` is `None` because we left the room causing the server to leave - # the room because no other local users are in it (quirk of the - # `current_state_delta_stream` table that we source things from) + self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + + self.assertEqual( + room_id_results[room_id1].event_id, + leave_response1["event_id"], + ) + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined` or `newly_left` because that happened before + # the from/to range + self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertEqual( room_id_results[room_id2].event_id, - None, # _leave_response2["event_id"], + leave_response2["event_id"], ) + self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) # We should *NOT* be `newly_joined` because we are instead `newly_left` self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, True) def test_no_joins_after_to_token(self) -> None: """ Rooms we join after the `to_token` should *not* show up. See condition "1b)" - comments in the `get_sync_room_ids_for_user()` method. + comments in the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -978,7 +1001,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.join(room_id2, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -991,14 +1014,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[room_id1].event_id, join_response1["event_id"], ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_join_during_range_and_left_room_after_to_token(self) -> None: """ Room still shows up if we left the room but were joined during the from_token/to_token. See condition "1a)" comments in the - `get_sync_room_ids_for_user()` method. + `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1016,7 +1041,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1038,14 +1063,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_join_before_range_and_left_room_after_to_token(self) -> None: """ Room still shows up if we left the room but were joined before the `from_token` so it should show up. See condition "1a)" comments in the - `get_sync_room_ids_for_user()` method. + `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1061,7 +1088,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1082,14 +1109,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_kicked_before_range_and_left_after_to_token(self) -> None: """ Room still shows up if we left the room but were kicked before the `from_token` so it should show up. See condition "1a)" comments in the - `get_sync_room_ids_for_user()` method. + `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1123,7 +1152,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response = self.helper.leave(kick_room_id, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_kick_token, to_token=after_kick_token, @@ -1146,14 +1175,17 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) + self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) # We should *NOT* be `newly_joined` because we were kicked self.assertEqual(room_id_results[kick_room_id].newly_joined, False) + self.assertEqual(room_id_results[kick_room_id].newly_left, False) def test_newly_left_during_range_and_join_leave_after_to_token(self) -> None: """ Newly left room should show up. But we're also testing that joining and leaving after the `to_token` doesn't mess with the results. See condition "2)" and "1a)" - comments in the `get_sync_room_ids_for_user()` method. + comments in the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1176,7 +1208,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1199,14 +1231,17 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) - # We should *NOT* be `newly_joined` because we left during the token range + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined` because we are actually `newly_left` during + # the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, True) def test_newly_left_during_range_and_join_after_to_token(self) -> None: """ Newly left room should show up. But we're also testing that joining after the `to_token` doesn't mess with the results. See condition "2)" and "1b)" comments - in the `get_sync_room_ids_for_user()` method. + in the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1228,7 +1263,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1250,16 +1285,19 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) - # We should *NOT* be `newly_joined` because we left during the token range + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined` because we are actually `newly_left` during + # the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, True) def test_no_from_token(self) -> None: """ - Test that if we don't provide a `from_token`, we get all the rooms that we we're - joined up to the `to_token`. + Test that if we don't provide a `from_token`, we get all the rooms that we had + membership in up to the `to_token`. - Providing `from_token` only really has the effect that it adds `newly_left` - rooms to the response. + Providing `from_token` only really has the effect that it marks rooms as + `newly_left` in the response. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1276,7 +1314,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): # Join and leave the room2 before the `to_token` self.helper.join(room_id2, user1_id, tok=user1_tok) - self.helper.leave(room_id2, user1_id, tok=user1_tok) + leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() @@ -1284,7 +1322,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.join(room_id2, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=None, to_token=after_room1_token, @@ -1292,15 +1330,31 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): ) # Only rooms we were joined to before the `to_token` should show up - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + + # Room1 # It should be pointing to the latest membership event in the from/to range self.assertEqual( room_id_results[room_id1].event_id, join_response1["event_id"], ) - # We should *NOT* be `newly_joined` because there is no `from_token` to - # define a "live" range to compare against + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + # We should *NOT* be `newly_joined`/`newly_left` because there is no + # `from_token` to define a "live" range to compare against self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) + + # Room2 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id2].event_id, + leave_response2["event_id"], + ) + self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined`/`newly_left` because there is no + # `from_token` to define a "live" range to compare against + self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, False) def test_from_token_ahead_of_to_token(self) -> None: """ @@ -1319,28 +1373,28 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) - # Join room1 before `before_room_token` - join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) + # Join room1 before `to_token` + join_room1_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) - # Join and leave the room2 before `before_room_token` - self.helper.join(room_id2, user1_id, tok=user1_tok) - self.helper.leave(room_id2, user1_id, tok=user1_tok) + # Join and leave the room2 before `to_token` + _join_room2_response1 = self.helper.join(room_id2, user1_id, tok=user1_tok) + leave_room2_response1 = self.helper.leave(room_id2, user1_id, tok=user1_tok) # Note: These are purposely swapped. The `from_token` should come after # the `to_token` in this test to_token = self.event_sources.get_current_token() - # Join room2 after `before_room_token` - self.helper.join(room_id2, user1_id, tok=user1_tok) + # Join room2 after `to_token` + _join_room2_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok) # -------- - # Join room3 after `before_room_token` - self.helper.join(room_id3, user1_id, tok=user1_tok) + # Join room3 after `to_token` + _join_room3_response1 = self.helper.join(room_id3, user1_id, tok=user1_tok) - # Join and leave the room4 after `before_room_token` - self.helper.join(room_id4, user1_id, tok=user1_tok) - self.helper.leave(room_id4, user1_id, tok=user1_tok) + # Join and leave the room4 after `to_token` + _join_room4_response1 = self.helper.join(room_id4, user1_id, tok=user1_tok) + _leave_room4_response1 = self.helper.leave(room_id4, user1_id, tok=user1_tok) # Note: These are purposely swapped. The `from_token` should come after the # `to_token` in this test @@ -1350,31 +1404,59 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.join(room_id4, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=from_token, to_token=to_token, ) ) - # Only rooms we were joined to before the `to_token` should show up - # - # There won't be any newly_left rooms because the `from_token` is ahead of the - # `to_token` and that range will give no membership changes to check. - self.assertEqual(room_id_results.keys(), {room_id1}) + # In the "current" state snapshot, we're joined to all of the rooms but in the + # from/to token range... + self.assertIncludes( + room_id_results.keys(), + { + # Included because we were joined before both tokens + room_id1, + # Included because we had membership before the to_token + room_id2, + # Excluded because we joined after the `to_token` + # room_id3, + # Excluded because we joined after the `to_token` + # room_id4, + }, + exact=True, + ) + + # Room1 # It should be pointing to the latest membership event in the from/to range self.assertEqual( room_id_results[room_id1].event_id, - join_response1["event_id"], + join_room1_response1["event_id"], ) - # We should *NOT* be `newly_joined` because we joined `room1` before either of the tokens + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + # We should *NOT* be `newly_joined`/`newly_left` because we joined `room1` + # before either of the tokens self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) + + # Room2 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id2].event_id, + leave_room2_response1["event_id"], + ) + self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined`/`newly_left` because we joined and left + # `room1` before either of the tokens + self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, False) def test_leave_before_range_and_join_leave_after_to_token(self) -> None: """ - Old left room shouldn't show up. But we're also testing that joining and leaving - after the `to_token` doesn't mess with the results. See condition "1a)" comments - in the `get_sync_room_ids_for_user()` method. + Test old left rooms. But we're also testing that joining and leaving after the + `to_token` doesn't mess with the results. See condition "1a)" comments in the + `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1386,7 +1468,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) # Join and leave the room before the from/to range self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.leave(room_id1, user1_id, tok=user1_tok) + leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() @@ -1395,21 +1477,30 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, ) ) - # Room shouldn't show up because it was left before the `from_token` - self.assertEqual(room_id_results.keys(), set()) + self.assertEqual(room_id_results.keys(), {room_id1}) + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id1].event_id, + leave_response["event_id"], + ) + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined`/`newly_left` because we joined and left + # `room1` before either of the tokens + self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_leave_before_range_and_join_after_to_token(self) -> None: """ - Old left room shouldn't show up. But we're also testing that joining after the - `to_token` doesn't mess with the results. See condition "1b)" comments in the - `get_sync_room_ids_for_user()` method. + Test old left room. But we're also testing that joining after the `to_token` + doesn't mess with the results. See condition "1b)" comments in the + `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1421,7 +1512,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) # Join and leave the room before the from/to range self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.leave(room_id1, user1_id, tok=user1_tok) + leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() @@ -1429,24 +1520,32 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.join(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, ) ) - # Room shouldn't show up because it was left before the `from_token` - self.assertEqual(room_id_results.keys(), set()) + self.assertEqual(room_id_results.keys(), {room_id1}) + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id1].event_id, + leave_response["event_id"], + ) + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined`/`newly_left` because we joined and left + # `room1` before either of the tokens + self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_join_leave_multiple_times_during_range_and_after_to_token( self, ) -> None: """ Join and leave multiple times shouldn't affect rooms from showing up. It just - matters that we were joined or newly_left in the from/to range. But we're also - testing that joining and leaving after the `to_token` doesn't mess with the - results. + matters that we had membership in the from/to range. But we're also testing that + joining and leaving after the `to_token` doesn't mess with the results. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1458,7 +1557,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): # We create the room with user2 so the room isn't left with no members when we # leave and can still re-join. room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) - # Join, leave, join back to the room before the from/to range + # Join, leave, join back to the room during the from/to range join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok) join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) @@ -1471,7 +1570,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1496,15 +1595,19 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + # We should *NOT* be `newly_left` because we joined during the token range and + # was still joined at the end of the range + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_join_leave_multiple_times_before_range_and_after_to_token( self, ) -> None: """ Join and leave multiple times before the from/to range shouldn't affect rooms - from showing up. It just matters that we were joined or newly_left in the + from showing up. It just matters that we had membership in the from/to range. But we're also testing that joining and leaving after the `to_token` doesn't mess with the results. """ @@ -1529,7 +1632,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1554,8 +1657,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_invite_before_range_and_join_leave_after_to_token( self, @@ -1563,7 +1668,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): """ Make it look like we joined after the token range but we were invited before the from/to range so the room should still show up. See condition "1a)" comments in - the `get_sync_room_ids_for_user()` method. + the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1586,7 +1691,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1608,9 +1713,11 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.INVITE) # We should *NOT* be `newly_joined` because we were only invited before the # token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_join_and_display_name_changes_in_token_range( self, @@ -1658,7 +1765,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): ) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1684,8 +1791,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_display_name_changes_in_token_range( self, @@ -1721,7 +1830,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_change1_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_change1_token, @@ -1744,8 +1853,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_display_name_changes_before_and_after_token_range( self, @@ -1791,7 +1902,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): ) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1817,8 +1928,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_display_name_changes_leave_after_token_range( self, @@ -1828,7 +1941,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): if there are multiple `join` membership events in a row indicating `displayname`/`avatar_url` updates and we leave after the `to_token`. - See condition "1a)" comments in the `get_sync_room_ids_for_user()` method. + See condition "1a)" comments in the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1871,7 +1984,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1897,8 +2010,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_display_name_changes_join_after_token_range( self, @@ -1908,7 +2023,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): indicating `displayname`/`avatar_url` updates doesn't affect the results (we joined after the token range so it shouldn't show up) - See condition "1b)" comments in the `get_sync_room_ids_for_user()` method. + See condition "1b)" comments in the `get_room_membership_for_user_at_to_token()` method. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -1937,7 +2052,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): ) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1973,7 +2088,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_more_changes_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_more_changes_token, @@ -1987,9 +2102,11 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): room_id_results[room_id1].event_id, join_response2["event_id"], ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be considered `newly_joined` because there is some non-join event in # between our latest join event. self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_newly_joined_only_joins_during_token_range( self, @@ -2036,7 +2153,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): after_room1_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -2062,8 +2179,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): } ), ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we first joined during the token range self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) def test_multiple_rooms_are_not_confused( self, @@ -2086,16 +2205,18 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): # Invited and left the room before the token self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - self.helper.leave(room_id1, user1_id, tok=user1_tok) + leave_room1_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) # Invited to room2 - self.helper.invite(room_id2, src=user2_id, targ=user1_id, tok=user2_tok) + invite_room2_response = self.helper.invite( + room_id2, src=user2_id, targ=user1_id, tok=user2_tok + ) before_room3_token = self.event_sources.get_current_token() # Invited and left room3 during the from/to range room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) self.helper.invite(room_id3, src=user2_id, targ=user1_id, tok=user2_tok) - self.helper.leave(room_id3, user1_id, tok=user1_tok) + leave_room3_response = self.helper.leave(room_id3, user1_id, tok=user1_tok) after_room3_token = self.event_sources.get_current_token() @@ -2108,7 +2229,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.helper.leave(room_id3, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room3_token, to_token=after_room3_token, @@ -2118,19 +2239,158 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): self.assertEqual( room_id_results.keys(), { - # `room_id1` shouldn't show up because we left before the from/to range - # - # Room should show up because we were invited before the from/to range + # Left before the from/to range + room_id1, + # Invited before the from/to range room_id2, - # Room should show up because it was newly_left during the from/to range + # `newly_left` during the from/to range room_id3, }, ) + # Room1 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id1].event_id, + leave_room1_response["event_id"], + ) + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined`/`newly_left` because we were invited and left + # before the token range + self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertEqual(room_id_results[room_id1].newly_left, False) -class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): + # Room2 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id2].event_id, + invite_room2_response["event_id"], + ) + self.assertEqual(room_id_results[room_id2].membership, Membership.INVITE) + # We should *NOT* be `newly_joined`/`newly_left` because we were invited before + # the token range + self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, False) + + # Room3 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id3].event_id, + leave_room3_response["event_id"], + ) + self.assertEqual(room_id_results[room_id3].membership, Membership.LEAVE) + # We should be `newly_left` because we were invited and left during + # the token range + self.assertEqual(room_id_results[room_id3].newly_joined, False) + self.assertEqual(room_id_results[room_id3].newly_left, True) + + def test_state_reset(self) -> None: + """ + Test a state reset scenario where the user gets removed from the room (when + there is no corresponding leave event) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # The room where the state reset will happen + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Join another room so we don't hit the short-circuit and return early if they + # have no room membership + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + + before_reset_token = self.event_sources.get_current_token() + + # Send another state event to make a position for the state reset to happen at + dummy_state_response = self.helper.send_state( + room_id1, + event_type="foobarbaz", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + dummy_state_pos = self.get_success( + self.store.get_position_for_event(dummy_state_response["event_id"]) + ) + + # Mock a state reset removing the membership for user1 in the current state + self.get_success( + self.store.db_pool.simple_delete( + table="current_state_events", + keyvalues={ + "room_id": room_id1, + "type": EventTypes.Member, + "state_key": user1_id, + }, + desc="state reset user in current_state_events", + ) + ) + self.get_success( + self.store.db_pool.simple_delete( + table="local_current_membership", + keyvalues={ + "room_id": room_id1, + "user_id": user1_id, + }, + desc="state reset user in local_current_membership", + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + table="current_state_delta_stream", + values={ + "stream_id": dummy_state_pos.stream, + "room_id": room_id1, + "type": EventTypes.Member, + "state_key": user1_id, + "event_id": None, + "prev_event_id": join_response1["event_id"], + "instance_name": dummy_state_pos.instance_name, + }, + desc="state reset user in current_state_delta_stream", + ) + ) + + # Manually bust the cache since we we're just manually messing with the database + # and not causing an actual state reset. + self.store._membership_stream_cache.entity_has_changed( + user1_id, dummy_state_pos.stream + ) + + after_reset_token = self.event_sources.get_current_token() + + # The function under test + room_id_results = self.get_success( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + UserID.from_string(user1_id), + from_token=before_reset_token, + to_token=after_reset_token, + ) + ) + + # Room1 should show up because it was `newly_left` via state reset during the from/to range + self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + # It should be pointing to no event because we were removed from the room + # without a corresponding leave event + self.assertEqual( + room_id_results[room_id1].event_id, + None, + ) + # State reset caused us to leave the room and there is no corresponding leave event + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined` because we joined before the token range + self.assertEqual(room_id_results[room_id1].newly_joined, False) + # We should be `newly_left` because we were removed via state reset during the from/to range + self.assertEqual(room_id_results[room_id1].newly_left, True) + + +class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCase): """ - Tests Sliding Sync handler `get_sync_room_ids_for_user()` to make sure it works with + Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it works with sharded event stream_writers enabled """ @@ -2189,7 +2449,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): We then send some events to advance the stream positions of worker1 and worker3 but worker2 is lagging behind because it's stuck. We are specifically testing - that `get_sync_room_ids_for_user(from_token=xxx, to_token=xxx)` should work + that `get_room_membership_for_user_at_to_token(from_token=xxx, to_token=xxx)` should work correctly in these adverse conditions. """ user1_id = self.register_user("user1", "pass") @@ -2228,7 +2488,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok) # Leave room2 - self.helper.leave(room_id2, user1_id, tok=user1_tok) + leave_room2_response = self.helper.leave(room_id2, user1_id, tok=user1_tok) join_response3 = self.helper.join(room_id3, user1_id, tok=user1_tok) # Leave room3 self.helper.leave(room_id3, user1_id, tok=user1_tok) @@ -2265,7 +2525,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): # For room_id1/worker1: leave and join the room to advance the stream position # and generate membership changes. self.helper.leave(room_id1, user1_id, tok=user1_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) + join_room1_response = self.helper.join(room_id1, user1_id, tok=user1_tok) # For room_id2/worker2: which is currently stuck, join the room. join_on_worker2_response = self.helper.join(room_id2, user1_id, tok=user1_tok) # For room_id3/worker3: leave and join the room to advance the stream position @@ -2319,7 +2579,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): # The function under test room_id_results = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_stuck_activity_token, to_token=stuck_activity_token, @@ -2330,18 +2590,411 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase): room_id_results.keys(), { room_id1, - # room_id2 shouldn't show up because we left before the from/to range - # and the join event during the range happened while worker2 was stuck. - # This means that from the perspective of the master, where the - # `stuck_activity_token` is generated, the stream position for worker2 - # wasn't advanced to the join yet. Looking at the `instance_map`, the - # join technically comes after `stuck_activity_token``. - # - # room_id2, + room_id2, room_id3, }, ) + # Room1 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id1].event_id, + join_room1_response["event_id"], + ) + self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + # We should be `newly_joined` because we joined during the token range + self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertEqual(room_id_results[room_id1].newly_left, False) + + # Room2 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id2].event_id, + leave_room2_response["event_id"], + ) + self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) + # room_id2 should *NOT* be considered `newly_left` because we left before the + # from/to range and the join event during the range happened while worker2 was + # stuck. This means that from the perspective of the master, where the + # `stuck_activity_token` is generated, the stream position for worker2 wasn't + # advanced to the join yet. Looking at the `instance_map`, the join technically + # comes after `stuck_activity_token`. + self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, False) + + # Room3 + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[room_id3].event_id, + join_on_worker3_response["event_id"], + ) + self.assertEqual(room_id_results[room_id3].membership, Membership.JOIN) + # We should be `newly_joined` because we joined during the token range + self.assertEqual(room_id_results[room_id3].newly_joined, True) + self.assertEqual(room_id_results[room_id3].newly_left, False) + + +class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): + """ + Tests Sliding Sync handler `filter_rooms_relevant_for_sync()` to make sure it returns + the correct list of rooms IDs. + """ + + servlets = [ + admin.register_servlets, + knock.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.sliding_sync_handler = self.hs.get_sliding_sync_handler() + self.store = self.hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.storage_controllers = hs.get_storage_controllers() + + def _get_sync_room_ids_for_user( + self, + user: UserID, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Dict[str, _RoomMembershipForUser]: + """ + Get the rooms the user should be syncing with + """ + room_membership_for_user_map = self.get_success( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + user=user, + from_token=from_token, + to_token=to_token, + ) + ) + filtered_sync_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms_relevant_for_sync( + user=user, + room_membership_for_user_map=room_membership_for_user_map, + ) + ) + + return filtered_sync_room_map + + def test_no_rooms(self) -> None: + """ + Test when the user has never joined any rooms before + """ + user1_id = self.register_user("user1", "pass") + # user1_tok = self.login(user1_id, "pass") + + now_token = self.event_sources.get_current_token() + + room_id_results = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=now_token, + to_token=now_token, + ) + + self.assertEqual(room_id_results.keys(), set()) + + def test_basic_rooms(self) -> None: + """ + Test that rooms that the user is joined to, invited to, banned from, and knocked + on show up. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + before_room_token = self.event_sources.get_current_token() + + join_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + join_response = self.helper.join(join_room_id, user1_id, tok=user1_tok) + + # Setup the invited room (user2 invites user1 to the room) + invited_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + invite_response = self.helper.invite( + invited_room_id, targ=user1_id, tok=user2_tok + ) + + # Setup the ban room (user2 bans user1 from the room) + ban_room_id = self.helper.create_room_as( + user2_id, tok=user2_tok, is_public=True + ) + self.helper.join(ban_room_id, user1_id, tok=user1_tok) + ban_response = self.helper.ban( + ban_room_id, src=user2_id, targ=user1_id, tok=user2_tok + ) + + # Setup the knock room (user1 knocks on the room) + knock_room_id = self.helper.create_room_as( + user2_id, tok=user2_tok, room_version=RoomVersions.V7.identifier + ) + self.helper.send_state( + knock_room_id, + EventTypes.JoinRules, + {"join_rule": JoinRules.KNOCK}, + tok=user2_tok, + ) + # User1 knocks on the room + knock_channel = self.make_request( + "POST", + "/_matrix/client/r0/knock/%s" % (knock_room_id,), + b"{}", + user1_tok, + ) + self.assertEqual(knock_channel.code, 200, knock_channel.result) + knock_room_membership_state_event = self.get_success( + self.storage_controllers.state.get_current_state_event( + knock_room_id, EventTypes.Member, user1_id + ) + ) + assert knock_room_membership_state_event is not None + + after_room_token = self.event_sources.get_current_token() + + room_id_results = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=before_room_token, + to_token=after_room_token, + ) + + # Ensure that the invited, ban, and knock rooms show up + self.assertEqual( + room_id_results.keys(), + { + join_room_id, + invited_room_id, + ban_room_id, + knock_room_id, + }, + ) + # It should be pointing to the the respective membership event (latest + # membership event in the from/to range) + self.assertEqual( + room_id_results[join_room_id].event_id, + join_response["event_id"], + ) + self.assertEqual(room_id_results[join_room_id].membership, Membership.JOIN) + self.assertEqual(room_id_results[join_room_id].newly_joined, True) + self.assertEqual(room_id_results[join_room_id].newly_left, False) + + self.assertEqual( + room_id_results[invited_room_id].event_id, + invite_response["event_id"], + ) + self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE) + self.assertEqual(room_id_results[invited_room_id].newly_joined, False) + self.assertEqual(room_id_results[invited_room_id].newly_left, False) + + self.assertEqual( + room_id_results[ban_room_id].event_id, + ban_response["event_id"], + ) + self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN) + self.assertEqual(room_id_results[ban_room_id].newly_joined, False) + self.assertEqual(room_id_results[ban_room_id].newly_left, False) + + self.assertEqual( + room_id_results[knock_room_id].event_id, + knock_room_membership_state_event.event_id, + ) + self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK) + self.assertEqual(room_id_results[knock_room_id].newly_joined, False) + self.assertEqual(room_id_results[knock_room_id].newly_left, False) + + def test_only_newly_left_rooms_show_up(self) -> None: + """ + Test that `newly_left` rooms still show up in the sync response but rooms that + were left before the `from_token` don't show up. See condition "2)" comments in + the `get_room_membership_for_user_at_to_token()` method. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Leave before we calculate the `from_token` + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.leave(room_id1, user1_id, tok=user1_tok) + + after_room1_token = self.event_sources.get_current_token() + + # Leave during the from_token/to_token range (newly_left) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + _leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) + + after_room2_token = self.event_sources.get_current_token() + + room_id_results = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=after_room1_token, + to_token=after_room2_token, + ) + + # Only the `newly_left` room should show up + self.assertEqual(room_id_results.keys(), {room_id2}) + self.assertEqual( + room_id_results[room_id2].event_id, + _leave_response2["event_id"], + ) + # We should *NOT* be `newly_joined` because we are instead `newly_left` + self.assertEqual(room_id_results[room_id2].newly_joined, False) + self.assertEqual(room_id_results[room_id2].newly_left, True) + + def test_get_kicked_room(self) -> None: + """ + Test that a room that the user was kicked from still shows up. When the user + comes back to their client, they should see that they were kicked. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Setup the kick room (user2 kicks user1 from the room) + kick_room_id = self.helper.create_room_as( + user2_id, tok=user2_tok, is_public=True + ) + self.helper.join(kick_room_id, user1_id, tok=user1_tok) + # Kick user1 from the room + kick_response = self.helper.change_membership( + room=kick_room_id, + src=user2_id, + targ=user1_id, + tok=user2_tok, + membership=Membership.LEAVE, + extra_data={ + "reason": "Bad manners", + }, + ) + + after_kick_token = self.event_sources.get_current_token() + + room_id_results = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=after_kick_token, + to_token=after_kick_token, + ) + + # The kicked room should show up + self.assertEqual(room_id_results.keys(), {kick_room_id}) + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + room_id_results[kick_room_id].event_id, + kick_response["event_id"], + ) + self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) + self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) + # We should *NOT* be `newly_joined` because we were not joined at the the time + # of the `to_token`. + self.assertEqual(room_id_results[kick_room_id].newly_joined, False) + self.assertEqual(room_id_results[kick_room_id].newly_left, False) + + def test_state_reset(self) -> None: + """ + Test a state reset scenario where the user gets removed from the room (when + there is no corresponding leave event) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # The room where the state reset will happen + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Join another room so we don't hit the short-circuit and return early if they + # have no room membership + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + + before_reset_token = self.event_sources.get_current_token() + + # Send another state event to make a position for the state reset to happen at + dummy_state_response = self.helper.send_state( + room_id1, + event_type="foobarbaz", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + dummy_state_pos = self.get_success( + self.store.get_position_for_event(dummy_state_response["event_id"]) + ) + + # Mock a state reset removing the membership for user1 in the current state + self.get_success( + self.store.db_pool.simple_delete( + table="current_state_events", + keyvalues={ + "room_id": room_id1, + "type": EventTypes.Member, + "state_key": user1_id, + }, + desc="state reset user in current_state_events", + ) + ) + self.get_success( + self.store.db_pool.simple_delete( + table="local_current_membership", + keyvalues={ + "room_id": room_id1, + "user_id": user1_id, + }, + desc="state reset user in local_current_membership", + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + table="current_state_delta_stream", + values={ + "stream_id": dummy_state_pos.stream, + "room_id": room_id1, + "type": EventTypes.Member, + "state_key": user1_id, + "event_id": None, + "prev_event_id": join_response1["event_id"], + "instance_name": dummy_state_pos.instance_name, + }, + desc="state reset user in current_state_delta_stream", + ) + ) + + # Manually bust the cache since we we're just manually messing with the database + # and not causing an actual state reset. + self.store._membership_stream_cache.entity_has_changed( + user1_id, dummy_state_pos.stream + ) + + after_reset_token = self.event_sources.get_current_token() + + # The function under test + room_id_results = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=before_reset_token, + to_token=after_reset_token, + ) + + # Room1 should show up because it was `newly_left` via state reset during the from/to range + self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + # It should be pointing to no event because we were removed from the room + # without a corresponding leave event + self.assertEqual( + room_id_results[room_id1].event_id, + None, + ) + # State reset caused us to leave the room and there is no corresponding leave event + self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + # We should *NOT* be `newly_joined` because we joined before the token range + self.assertEqual(room_id_results[room_id1].newly_joined, False) + # We should be `newly_left` because we were removed via state reset during the from/to range + self.assertEqual(room_id_results[room_id1].newly_left, True) + class FilterRoomsTestCase(HomeserverTestCase): """ @@ -2367,6 +3020,31 @@ class FilterRoomsTestCase(HomeserverTestCase): self.store = self.hs.get_datastores().main self.event_sources = hs.get_event_sources() + def _get_sync_room_ids_for_user( + self, + user: UserID, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Dict[str, _RoomMembershipForUser]: + """ + Get the rooms the user should be syncing with + """ + room_membership_for_user_map = self.get_success( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + user=user, + from_token=from_token, + to_token=to_token, + ) + ) + filtered_sync_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms_relevant_for_sync( + user=user, + room_membership_for_user_map=room_membership_for_user_map, + ) + ) + + return filtered_sync_room_map + def _create_dm_room( self, inviter_user_id: str, @@ -2438,12 +3116,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Try with `is_dm=True` @@ -2496,12 +3172,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Try with `is_encrypted=True` @@ -2552,12 +3226,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Try with `is_invite=True` @@ -2621,12 +3293,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Try finding only normal rooms @@ -2714,12 +3384,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Try finding *NOT* normal rooms @@ -2838,12 +3506,10 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) filtered_room_map = self.get_success( @@ -2884,6 +3550,31 @@ class SortRoomsTestCase(HomeserverTestCase): self.store = self.hs.get_datastores().main self.event_sources = hs.get_event_sources() + def _get_sync_room_ids_for_user( + self, + user: UserID, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Dict[str, _RoomMembershipForUser]: + """ + Get the rooms the user should be syncing with + """ + room_membership_for_user_map = self.get_success( + self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + user=user, + from_token=from_token, + to_token=to_token, + ) + ) + filtered_sync_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms_relevant_for_sync( + user=user, + room_membership_for_user_map=room_membership_for_user_map, + ) + ) + + return filtered_sync_room_map + def test_sort_activity_basic(self) -> None: """ Rooms with newer activity are sorted first. @@ -2903,12 +3594,10 @@ class SortRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Sort the rooms (what we're testing) @@ -2986,12 +3675,10 @@ class SortRoomsTestCase(HomeserverTestCase): self.helper.send(room_id3, "activity in room3", tok=user2_tok) # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=before_rooms_token, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=before_rooms_token, + to_token=after_rooms_token, ) # Sort the rooms (what we're testing) @@ -3052,12 +3739,10 @@ class SortRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self.get_success( - self.sliding_sync_handler.get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, ) # Sort the rooms (what we're testing) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 4236812db5..f5d57e689c 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -20,7 +20,8 @@ # import json import logging -from typing import AbstractSet, Any, Dict, Iterable, List, Optional +from http import HTTPStatus +from typing import Any, Dict, Iterable, List from parameterized import parameterized, parameterized_class @@ -1259,7 +1260,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): exact: bool = False, ) -> None: """ - Wrapper around `_assertIncludes` to give slightly better looking diff error + Wrapper around `assertIncludes` to give slightly better looking diff error messages that include some context "$event_id (type, state_key)". Args: @@ -1275,7 +1276,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): for event in actual_required_state: assert isinstance(event, dict) - self._assertIncludes( + self.assertIncludes( { f'{event["event_id"]} ("{event["type"]}", "{event["state_key"]}")' for event in actual_required_state @@ -1289,56 +1290,6 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): message=str(actual_required_state), ) - def _assertIncludes( - self, - actual_items: AbstractSet[str], - expected_items: AbstractSet[str], - exact: bool = False, - message: Optional[str] = None, - ) -> None: - """ - Assert that all of the `expected_items` are included in the `actual_items`. - - This assert could also be called `assertContains`, `assertItemsInSet` - - Args: - actual_items: The container - expected_items: The items to check for in the container - exact: Whether the actual state should be exactly equal to the expected - state (no extras). - message: Optional message to include in the failure message. - """ - # Check that each set has the same items - if exact and actual_items == expected_items: - return - # Check for a superset - elif not exact and actual_items >= expected_items: - return - - expected_lines: List[str] = [] - for expected_item in expected_items: - is_expected_in_actual = expected_item in actual_items - expected_lines.append( - "{} {}".format(" " if is_expected_in_actual else "?", expected_item) - ) - - actual_lines: List[str] = [] - for actual_item in actual_items: - is_actual_in_expected = actual_item in expected_items - actual_lines.append( - "{} {}".format("+" if is_actual_in_expected else " ", actual_item) - ) - - newline = "\n" - expected_string = f"Expected items to be in actual ('?' = missing expected items):\n {{\n{newline.join(expected_lines)}\n }}" - actual_string = f"Actual ('+' = found expected items):\n {{\n{newline.join(actual_lines)}\n }}" - first_message = ( - "Items must match exactly" if exact else "Some expected items are missing." - ) - diff_message = f"{first_message}\n{expected_string}\n{actual_string}" - - self.fail(f"{diff_message}\n{message}") - def _add_new_dm_to_global_account_data( self, source_user_id: str, target_user_id: str, target_room_id: str ) -> None: @@ -3868,6 +3819,13 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): body={"foo": "bar"}, tok=user2_tok, ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "qux"}, + tok=user2_tok, + ) # Make the Sliding Sync request with wildcards for the `state_key` channel = self.make_request( @@ -3891,16 +3849,13 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ], "timeline_limit": 0, }, - } - # TODO: Room subscription should also combine with the `required_state` - # "room_subscriptions": { - # room_id1: { - # "required_state": [ - # ["org.matrix.bar_state", ""] - # ], - # "timeline_limit": 0, - # } - # } + }, + "room_subscriptions": { + room_id1: { + "required_state": [["org.matrix.bar_state", ""]], + "timeline_limit": 0, + } + }, }, access_token=user1_tok, ) @@ -3917,6 +3872,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): state_map[(EventTypes.Member, user1_id)], state_map[(EventTypes.Member, user2_id)], state_map[("org.matrix.foo_state", "")], + state_map[("org.matrix.bar_state", "")], }, exact=True, ) @@ -4009,6 +3965,271 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel.json_body["lists"]["foo-list"], ) + def test_room_subscriptions_with_join_membership(self) -> None: + """ + Test `room_subscriptions` with a joined room should give us timeline and current + state events. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request with just the room subscription + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # We should see some state + self._assertRequiredStateIncludes( + channel.json_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + + # We should see some events + self.assertEqual( + [ + event["event_id"] + for event in channel.json_body["rooms"][room_id1]["timeline"] + ], + [ + join_response["event_id"], + ], + channel.json_body["rooms"][room_id1]["timeline"], + ) + # No "live" events in an initial sync (no `from_token` to define the "live" + # range) + self.assertEqual( + channel.json_body["rooms"][room_id1]["num_live"], + 0, + channel.json_body["rooms"][room_id1], + ) + # There are more events to paginate to + self.assertEqual( + channel.json_body["rooms"][room_id1]["limited"], + True, + channel.json_body["rooms"][room_id1], + ) + + def test_room_subscriptions_with_leave_membership(self) -> None: + """ + Test `room_subscriptions` with a leave room should give us timeline and state + events up to the leave event. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + + join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Send some events after user1 leaves + self.helper.send(room_id1, "activity after leave", tok=user2_tok) + # Update state after user1 leaves + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "qux"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request with just the room subscription + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "room_subscriptions": { + room_id1: { + "required_state": [ + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 2, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the state at the time of the leave + self._assertRequiredStateIncludes( + channel.json_body["rooms"][room_id1]["required_state"], + { + state_map[("org.matrix.foo_state", "")], + }, + exact=True, + ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + + # We should see some before we left (nothing after) + self.assertEqual( + [ + event["event_id"] + for event in channel.json_body["rooms"][room_id1]["timeline"] + ], + [ + join_response["event_id"], + leave_response["event_id"], + ], + channel.json_body["rooms"][room_id1]["timeline"], + ) + # No "live" events in an initial sync (no `from_token` to define the "live" + # range) + self.assertEqual( + channel.json_body["rooms"][room_id1]["num_live"], + 0, + channel.json_body["rooms"][room_id1], + ) + # There are more events to paginate to + self.assertEqual( + channel.json_body["rooms"][room_id1]["limited"], + True, + channel.json_body["rooms"][room_id1], + ) + + def test_room_subscriptions_no_leak_private_room(self) -> None: + """ + Test `room_subscriptions` with a private room we have never been in should not + leak any data to the user. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=False) + + # We should not be able to join the private room + self.helper.join( + room_id1, user1_id, tok=user1_tok, expect_code=HTTPStatus.FORBIDDEN + ) + + # Make the Sliding Sync request with just the room subscription + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should not see the room at all (we're not in it) + self.assertIsNone( + channel.json_body["rooms"].get(room_id1), channel.json_body["rooms"] + ) + + def test_room_subscriptions_world_readable(self) -> None: + """ + Test `room_subscriptions` with a room that has `world_readable` history visibility + + FIXME: We should be able to see the room timeline and state + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a room with `world_readable` history visibility + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "preset": "public_chat", + "initial_state": [ + { + "content": { + "history_visibility": HistoryVisibility.WORLD_READABLE + }, + "state_key": "", + "type": EventTypes.RoomHistoryVisibility, + } + ], + }, + ) + # Ensure we're testing with a room with `world_readable` history visibility + # which means events are visible to anyone even without membership. + history_visibility_response = self.helper.get_state( + room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok + ) + self.assertEqual( + history_visibility_response.get("history_visibility"), + HistoryVisibility.WORLD_READABLE, + ) + + # Note: We never join the room + + # Make the Sliding Sync request with just the room subscription + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # FIXME: In the future, we should be able to see the room because it's + # `world_readable` but currently we don't support this. + self.assertIsNone( + channel.json_body["rooms"].get(room_id1), channel.json_body["rooms"] + ) + class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): """Tests for the to-device sliding sync extension""" diff --git a/tests/unittest.py b/tests/unittest.py index a7c20556a0..4aa7f56106 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -28,6 +28,7 @@ import logging import secrets import time from typing import ( + AbstractSet, Any, Awaitable, Callable, @@ -269,6 +270,56 @@ class TestCase(unittest.TestCase): required[key], actual[key], msg="%s mismatch. %s" % (key, actual) ) + def assertIncludes( + self, + actual_items: AbstractSet[str], + expected_items: AbstractSet[str], + exact: bool = False, + message: Optional[str] = None, + ) -> None: + """ + Assert that all of the `expected_items` are included in the `actual_items`. + + This assert could also be called `assertContains`, `assertItemsInSet` + + Args: + actual_items: The container + expected_items: The items to check for in the container + exact: Whether the actual state should be exactly equal to the expected + state (no extras). + message: Optional message to include in the failure message. + """ + # Check that each set has the same items + if exact and actual_items == expected_items: + return + # Check for a superset + elif not exact and actual_items >= expected_items: + return + + expected_lines: List[str] = [] + for expected_item in expected_items: + is_expected_in_actual = expected_item in actual_items + expected_lines.append( + "{} {}".format(" " if is_expected_in_actual else "?", expected_item) + ) + + actual_lines: List[str] = [] + for actual_item in actual_items: + is_actual_in_expected = actual_item in expected_items + actual_lines.append( + "{} {}".format("+" if is_actual_in_expected else " ", actual_item) + ) + + newline = "\n" + expected_string = f"Expected items to be in actual ('?' = missing expected items):\n {{\n{newline.join(expected_lines)}\n }}" + actual_string = f"Actual ('+' = found expected items):\n {{\n{newline.join(actual_lines)}\n }}" + first_message = ( + "Items must match exactly" if exact else "Some expected items are missing." + ) + diff_message = f"{first_message}\n{expected_string}\n{actual_string}" + + self.fail(f"{diff_message}\n{message}") + def DEBUG(target: TV) -> TV: """A decorator to set the .loglevel attribute to logging.DEBUG. From 4f6194492a4f16c37e55d15d0434d5357266fed0 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 15 Jul 2024 11:42:59 +0200 Subject: [PATCH 033/332] Make sure we use the right logic for enabling the media repo. (#17424) This removes the `enable_media_repo` attribute on the server config in favour of always using the `can_load_media_repo` in the media config. This should avoid issues like in #17420 in the future --- changelog.d/17424.misc | 1 + synapse/app/homeserver.py | 2 +- synapse/config/repository.py | 2 +- synapse/config/server.py | 6 ------ 4 files changed, 3 insertions(+), 8 deletions(-) create mode 100644 changelog.d/17424.misc diff --git a/changelog.d/17424.misc b/changelog.d/17424.misc new file mode 100644 index 0000000000..d4a81c137f --- /dev/null +++ b/changelog.d/17424.misc @@ -0,0 +1 @@ +Make sure we always use the right logic for enabling the media repo. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 2b111847b7..e114ab7ec4 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -217,7 +217,7 @@ class SynapseHomeServer(HomeServer): ) if name in ["media", "federation", "client"]: - if self.config.server.enable_media_repo: + if self.config.media.can_load_media_repo: media_repo = self.get_media_repository_resource() resources.update( { diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 1645470499..dc0e93ffa1 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -126,7 +126,7 @@ class ContentRepositoryConfig(Config): # Only enable the media repo if either the media repo is enabled or the # current worker app is the media repo. if ( - self.root.server.enable_media_repo is False + config.get("enable_media_repo", True) is False and config.get("worker_app") != "synapse.app.media_repository" ): self.can_load_media_repo = False diff --git a/synapse/config/server.py b/synapse/config/server.py index a2b2305776..8bb97df175 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -395,12 +395,6 @@ class ServerConfig(Config): self.presence_router_config, ) = load_module(presence_router_config, ("presence", "presence_router")) - # whether to enable the media repository endpoints. This should be set - # to false if the media repository is running as a separate endpoint; - # doing so ensures that we will not run cache cleanup jobs on the - # master, potentially causing inconsistency. - self.enable_media_repo = config.get("enable_media_repo", True) - # Whether to require authentication to retrieve profile data (avatars, # display names) of other users through the client API. self.require_auth_for_profile_requests = config.get( From 4c44020838ddf49dba3e22029d25ff880b260a69 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:57:13 +0100 Subject: [PATCH 034/332] Bump twine from 5.1.0 to 5.1.1 (#17443) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index f578a33ed5..3f3a3e38d9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2704,19 +2704,19 @@ docs = ["sphinx (<7.0.0)"] [[package]] name = "twine" -version = "5.1.0" +version = "5.1.1" description = "Collection of utilities for publishing packages on PyPI" optional = false python-versions = ">=3.8" files = [ - {file = "twine-5.1.0-py3-none-any.whl", hash = "sha256:fe1d814395bfe50cfbe27783cb74efe93abeac3f66deaeb6c8390e4e92bacb43"}, - {file = "twine-5.1.0.tar.gz", hash = "sha256:4d74770c88c4fcaf8134d2a6a9d863e40f08255ff7d8e2acb3cbbd57d25f6e9d"}, + {file = "twine-5.1.1-py3-none-any.whl", hash = "sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997"}, + {file = "twine-5.1.1.tar.gz", hash = "sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db"}, ] [package.dependencies] importlib-metadata = ">=3.6" keyring = ">=15.1" -pkginfo = ">=1.8.1" +pkginfo = ">=1.8.1,<1.11" readme-renderer = ">=35.0" requests = ">=2.20" requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0" From 0de0689ae8fc4b2311aea0e007f8144dfbde370b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:57:30 +0100 Subject: [PATCH 035/332] Bump jsonschema from 4.22.0 to 4.23.0 (#17444) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3f3a3e38d9..0cff51faf8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -934,13 +934,13 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jsonschema" -version = "4.22.0" +version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"}, - {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"}, + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, ] [package.dependencies] @@ -953,7 +953,7 @@ rpds-py = ">=0.7.1" [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] -format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] [[package]] name = "jsonschema-specifications" From 2af729a193389cc71e61232b67a62574e166128c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:57:45 +0100 Subject: [PATCH 036/332] Bump ulid from 1.1.2 to 1.1.3 (#17442) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a8bf7a49c..cc2c59d6f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -597,9 +597,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ulid" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34778c17965aa2a08913b57e1f34db9b4a63f5de31768b55bf20d2795f921259" +checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289" dependencies = [ "getrandom", "rand", From 14f2b1eb00986b7ca06739abe01fc88533885991 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:58:12 +0100 Subject: [PATCH 037/332] Bump bytes from 1.6.0 to 1.6.1 (#17441) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc2c59d6f6..e9adfcbdc3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytes" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" [[package]] name = "cfg-if" From d88ba45db9f5d344542a04a06c29ea26ebb8fa21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:58:28 +0100 Subject: [PATCH 038/332] Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712 (#17446) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0cff51faf8..805089e79e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2851,13 +2851,13 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.22.0.20240610" +version = "4.23.0.20240712" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.8" files = [ - {file = "types-jsonschema-4.22.0.20240610.tar.gz", hash = "sha256:f82ab9fe756e3a2642ea9712c46b403ce61eb380b939b696cff3252af42f65b0"}, - {file = "types_jsonschema-4.22.0.20240610-py3-none-any.whl", hash = "sha256:89996b9bd1928f820a0e252b2844be21cd2e55d062b6fa1048d88453006ad89e"}, + {file = "types-jsonschema-4.23.0.20240712.tar.gz", hash = "sha256:b20db728dcf7ea3e80e9bdeb55e8b8420c6c040cda14e8cf284465adee71d217"}, + {file = "types_jsonschema-4.23.0.20240712-py3-none-any.whl", hash = "sha256:8c33177ce95336241c1d61ccb56a9964d4361b99d5f1cd81a1ab4909b0dd7cf4"}, ] [package.dependencies] From df11af14dbd2faad916924cab96e75bd7c95a66a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Jul 2024 16:13:04 +0100 Subject: [PATCH 039/332] Fix bug where sync could get stuck when using workers (#17438) This is because we serialized the token wrong if the instance map contained entries from before the minimum token. --- changelog.d/17438.bugfix | 1 + synapse/handlers/sliding_sync.py | 11 ++++- synapse/types/__init__.py | 65 +++++++++++++++++++++++++---- tests/test_types.py | 71 ++++++++++++++++++++++++++++++++ 4 files changed, 138 insertions(+), 10 deletions(-) create mode 100644 changelog.d/17438.bugfix diff --git a/changelog.d/17438.bugfix b/changelog.d/17438.bugfix new file mode 100644 index 0000000000..cff6eecd48 --- /dev/null +++ b/changelog.d/17438.bugfix @@ -0,0 +1 @@ +Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index be98b379eb..1b5262d667 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -699,10 +699,17 @@ class SlidingSyncHandler: instance_to_max_stream_ordering_map[instance_name] = stream_ordering # Then assemble the `RoomStreamToken` + min_stream_pos = min(instance_to_max_stream_ordering_map.values()) membership_snapshot_token = RoomStreamToken( # Minimum position in the `instance_map` - stream=min(instance_to_max_stream_ordering_map.values()), - instance_map=immutabledict(instance_to_max_stream_ordering_map), + stream=min_stream_pos, + instance_map=immutabledict( + { + instance_name: stream_pos + for instance_name, stream_pos in instance_to_max_stream_ordering_map.items() + if stream_pos > min_stream_pos + } + ), ) # Since we fetched the users room list at some point in time after the from/to diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index b22a13ef01..3962ecc996 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -20,6 +20,7 @@ # # import abc +import logging import re import string from enum import Enum @@ -74,6 +75,9 @@ if TYPE_CHECKING: from synapse.storage.databases.main import DataStore, PurgeEventsStore from synapse.storage.databases.main.appservice import ApplicationServiceWorkerStore + +logger = logging.getLogger(__name__) + # Define a state map type from type/state_key to T (usually an event ID or # event) T = TypeVar("T") @@ -454,6 +458,8 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta): represented by a default `stream` attribute and a map of instance name to stream position of any writers that are ahead of the default stream position. + + The values in `instance_map` must be greater than the `stream` attribute. """ stream: int = attr.ib(validator=attr.validators.instance_of(int), kw_only=True) @@ -468,6 +474,15 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta): kw_only=True, ) + def __attrs_post_init__(self) -> None: + # Enforce that all instances have a value greater than the min stream + # position. + for i, v in self.instance_map.items(): + if v <= self.stream: + raise ValueError( + f"'instance_map' includes a stream position before the main 'stream' attribute. Instance: {i}" + ) + @classmethod @abc.abstractmethod async def parse(cls, store: "DataStore", string: str) -> "Self": @@ -494,6 +509,9 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta): for instance in set(self.instance_map).union(other.instance_map) } + # Filter out any redundant entries. + instance_map = {i: s for i, s in instance_map.items() if s > max_stream} + return attr.evolve( self, stream=max_stream, instance_map=immutabledict(instance_map) ) @@ -539,10 +557,15 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta): def bound_stream_token(self, max_stream: int) -> "Self": """Bound the stream positions to a maximum value""" + min_pos = min(self.stream, max_stream) return type(self)( - stream=min(self.stream, max_stream), + stream=min_pos, instance_map=immutabledict( - {k: min(s, max_stream) for k, s in self.instance_map.items()} + { + k: min(s, max_stream) + for k, s in self.instance_map.items() + if min(s, max_stream) > min_pos + } ), ) @@ -637,6 +660,8 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): "Cannot set both 'topological' and 'instance_map' on 'RoomStreamToken'." ) + super().__attrs_post_init__() + @classmethod async def parse(cls, store: "PurgeEventsStore", string: str) -> "RoomStreamToken": try: @@ -651,6 +676,11 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): instance_map = {} for part in parts[1:]: + if not part: + # Handle tokens of the form `m5~`, which were created by + # a bug + continue + key, value = part.split(".") instance_id = int(key) pos = int(value) @@ -666,7 +696,10 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): except CancelledError: raise except Exception: - pass + # We log an exception here as even though this *might* be a client + # handing a bad token, its more likely that Synapse returned a bad + # token (and we really want to catch those!). + logger.exception("Failed to parse stream token: %r", string) raise SynapseError(400, "Invalid room stream token %r" % (string,)) @classmethod @@ -713,6 +746,8 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): return self.instance_map.get(instance_name, self.stream) async def to_string(self, store: "DataStore") -> str: + """See class level docstring for information about the format.""" + if self.topological is not None: return "t%d-%d" % (self.topological, self.stream) elif self.instance_map: @@ -727,8 +762,10 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): instance_id = await store.get_id_for_instance(name) entries.append(f"{instance_id}.{pos}") - encoded_map = "~".join(entries) - return f"m{self.stream}~{encoded_map}" + if entries: + encoded_map = "~".join(entries) + return f"m{self.stream}~{encoded_map}" + return f"s{self.stream}" else: return "s%d" % (self.stream,) @@ -756,6 +793,11 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): instance_map = {} for part in parts[1:]: + if not part: + # Handle tokens of the form `m5~`, which were created by + # a bug + continue + key, value = part.split(".") instance_id = int(key) pos = int(value) @@ -770,10 +812,15 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): except CancelledError: raise except Exception: - pass + # We log an exception here as even though this *might* be a client + # handing a bad token, its more likely that Synapse returned a bad + # token (and we really want to catch those!). + logger.exception("Failed to parse stream token: %r", string) raise SynapseError(400, "Invalid stream token %r" % (string,)) async def to_string(self, store: "DataStore") -> str: + """See class level docstring for information about the format.""" + if self.instance_map: entries = [] for name, pos in self.instance_map.items(): @@ -786,8 +833,10 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): instance_id = await store.get_id_for_instance(name) entries.append(f"{instance_id}.{pos}") - encoded_map = "~".join(entries) - return f"m{self.stream}~{encoded_map}" + if entries: + encoded_map = "~".join(entries) + return f"m{self.stream}~{encoded_map}" + return str(self.stream) else: return str(self.stream) diff --git a/tests/test_types.py b/tests/test_types.py index 944aa784fc..00adc65a5a 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -19,9 +19,18 @@ # # +from typing import Type +from unittest import skipUnless + +from immutabledict import immutabledict +from parameterized import parameterized_class + from synapse.api.errors import SynapseError from synapse.types import ( + AbstractMultiWriterStreamToken, + MultiWriterStreamToken, RoomAlias, + RoomStreamToken, UserID, get_domain_from_id, get_localpart_from_id, @@ -29,6 +38,7 @@ from synapse.types import ( ) from tests import unittest +from tests.utils import USE_POSTGRES_FOR_TESTS class IsMineIDTests(unittest.HomeserverTestCase): @@ -127,3 +137,64 @@ class MapUsernameTestCase(unittest.TestCase): # this should work with either a unicode or a bytes self.assertEqual(map_username_to_mxid_localpart("têst"), "t=c3=aast") self.assertEqual(map_username_to_mxid_localpart("têst".encode()), "t=c3=aast") + + +@parameterized_class( + ("token_type",), + [ + (MultiWriterStreamToken,), + (RoomStreamToken,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{params_dict['token_type'].__name__}", +) +class MultiWriterTokenTestCase(unittest.HomeserverTestCase): + """Tests for the different types of multi writer tokens.""" + + token_type: Type[AbstractMultiWriterStreamToken] + + def test_basic_token(self) -> None: + """Test that a simple stream token can be serialized and unserialized""" + store = self.hs.get_datastores().main + + token = self.token_type(stream=5) + + string_token = self.get_success(token.to_string(store)) + + if isinstance(token, RoomStreamToken): + self.assertEqual(string_token, "s5") + else: + self.assertEqual(string_token, "5") + + parsed_token = self.get_success(self.token_type.parse(store, string_token)) + self.assertEqual(parsed_token, token) + + @skipUnless(USE_POSTGRES_FOR_TESTS, "Requires Postgres") + def test_instance_map(self) -> None: + """Test for stream token with instance map""" + store = self.hs.get_datastores().main + + token = self.token_type(stream=5, instance_map=immutabledict({"foo": 6})) + + string_token = self.get_success(token.to_string(store)) + self.assertEqual(string_token, "m5~1.6") + + parsed_token = self.get_success(self.token_type.parse(store, string_token)) + self.assertEqual(parsed_token, token) + + def test_instance_map_assertion(self) -> None: + """Test that we assert values in the instance map are greater than the + min stream position""" + + with self.assertRaises(ValueError): + self.token_type(stream=5, instance_map=immutabledict({"foo": 4})) + + with self.assertRaises(ValueError): + self.token_type(stream=5, instance_map=immutabledict({"foo": 5})) + + def test_parse_bad_token(self) -> None: + """Test that we can parse tokens produced by a bug in Synapse of the + form `m5~`""" + store = self.hs.get_datastores().main + + parsed_token = self.get_success(self.token_type.parse(store, "m5~")) + self.assertEqual(parsed_token, self.token_type(stream=5)) From 899d33f2baf42727fb9b4c118a931d872690a81c Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 16 Jul 2024 01:52:39 -0700 Subject: [PATCH 040/332] Remove unnecessary call to resume producing in fake channel (#17449) This fell out of the authenticated media work - this bit of code masked a bug but does not break anything when removed, so probably should be removed. --- changelog.d/17449.bugfix | 1 + tests/server.py | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) create mode 100644 changelog.d/17449.bugfix diff --git a/changelog.d/17449.bugfix b/changelog.d/17449.bugfix new file mode 100644 index 0000000000..cd847a3d1c --- /dev/null +++ b/changelog.d/17449.bugfix @@ -0,0 +1 @@ +Remove unnecessary call to resume producing in fake channel. \ No newline at end of file diff --git a/tests/server.py b/tests/server.py index f1cd0f76be..85602e6953 100644 --- a/tests/server.py +++ b/tests/server.py @@ -289,10 +289,6 @@ class FakeChannel: self._reactor.run() while not self.is_finished(): - # If there's a producer, tell it to resume producing so we get content - if self._producer: - self._producer.resumeProducing() - if self._reactor.seconds() > end_time: raise TimedOutException("Timed out waiting for request to finish.") From 9e1acea0512bc6dd6ceddcc44619c754e16c2f97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 10:06:05 +0100 Subject: [PATCH 041/332] Bump setuptools from 67.6.0 to 70.0.0 (#17448) --- poetry.lock | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index 805089e79e..4afedfbe0b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2504,19 +2504,18 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"] [[package]] name = "setuptools" -version = "67.6.0" +version = "70.0.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"}, - {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"}, + {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, + {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "setuptools-rust" From 429ecb7564640b4d04588173e9e54bf53524aadf Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 16 Jul 2024 03:13:55 -0700 Subject: [PATCH 042/332] Handle remote download responses with `UNKNOWN_LENGTH` more gracefully (#17439) Prior to this PR, remote downloads which did not provide a `content-length` were decremented from the remote download ratelimiter at the max allowable size, leading to excessive ratelimiting - see https://github.com/element-hq/synapse/issues/17394. This PR adds a linearizer to limit concurrent remote downloads to 6 per IP address, and decrements remote downloads without a `content-length` from the ratelimiter *after* the download is complete and the response length is known. Also adds logic to ensure that responses with a known length respect the `max_download_size`. --- changelog.d/17439.bugfix | 1 + synapse/http/matrixfederationclient.py | 126 ++++++++++++++++--------- tests/media/test_media_storage.py | 49 ++++++++-- tests/rest/client/test_media.py | 50 +++++++++- 4 files changed, 166 insertions(+), 60 deletions(-) create mode 100644 changelog.d/17439.bugfix diff --git a/changelog.d/17439.bugfix b/changelog.d/17439.bugfix new file mode 100644 index 0000000000..f36c3ec255 --- /dev/null +++ b/changelog.d/17439.bugfix @@ -0,0 +1 @@ +Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. \ No newline at end of file diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 749b01dd0e..6fd75fd381 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -90,7 +90,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.types import JsonDict from synapse.util import json_decoder -from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred +from synapse.util.async_helpers import AwakenableSleeper, Linearizer, timeout_deferred from synapse.util.metrics import Measure from synapse.util.stringutils import parse_and_validate_server_name @@ -475,6 +475,8 @@ class MatrixFederationHttpClient: use_proxy=True, ) + self.remote_download_linearizer = Linearizer("remote_download_linearizer", 6) + def wake_destination(self, destination: str) -> None: """Called when the remote server may have come back online.""" @@ -1486,35 +1488,44 @@ class MatrixFederationHttpClient: ) headers = dict(response.headers.getAllRawHeaders()) - expected_size = response.length - # if we don't get an expected length then use the max length + if expected_size == UNKNOWN_LENGTH: expected_size = max_size - logger.debug( - f"File size unknown, assuming file is max allowable size: {max_size}" - ) + else: + if int(expected_size) > max_size: + msg = "Requested file is too large > %r bytes" % (max_size,) + logger.warning( + "{%s} [%s] %s", + request.txn_id, + request.destination, + msg, + ) + raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE) - read_body, _ = await download_ratelimiter.can_do_action( - requester=None, - key=ip_address, - n_actions=expected_size, - ) - if not read_body: - msg = "Requested file size exceeds ratelimits" - logger.warning( - "{%s} [%s] %s", - request.txn_id, - request.destination, - msg, + read_body, _ = await download_ratelimiter.can_do_action( + requester=None, + key=ip_address, + n_actions=expected_size, ) - raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED) + if not read_body: + msg = "Requested file size exceeds ratelimits" + logger.warning( + "{%s} [%s] %s", + request.txn_id, + request.destination, + msg, + ) + raise SynapseError( + HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED + ) try: - # add a byte of headroom to max size as function errs at >= - d = read_body_with_max_size(response, output_stream, expected_size + 1) - d.addTimeout(self.default_timeout_seconds, self.reactor) - length = await make_deferred_yieldable(d) + async with self.remote_download_linearizer.queue(ip_address): + # add a byte of headroom to max size as function errs at >= + d = read_body_with_max_size(response, output_stream, expected_size + 1) + d.addTimeout(self.default_timeout_seconds, self.reactor) + length = await make_deferred_yieldable(d) except BodyExceededMaxSize: msg = "Requested file is too large > %r bytes" % (expected_size,) logger.warning( @@ -1560,6 +1571,13 @@ class MatrixFederationHttpClient: request.method, request.uri.decode("ascii"), ) + + # if we didn't know the length upfront, decrement the actual size from ratelimiter + if response.length == UNKNOWN_LENGTH: + download_ratelimiter.record_action( + requester=None, key=ip_address, n_actions=length + ) + return length, headers async def federation_get_file( @@ -1630,29 +1648,37 @@ class MatrixFederationHttpClient: ) headers = dict(response.headers.getAllRawHeaders()) - expected_size = response.length - # if we don't get an expected length then use the max length + if expected_size == UNKNOWN_LENGTH: expected_size = max_size - logger.debug( - f"File size unknown, assuming file is max allowable size: {max_size}" - ) + else: + if int(expected_size) > max_size: + msg = "Requested file is too large > %r bytes" % (max_size,) + logger.warning( + "{%s} [%s] %s", + request.txn_id, + request.destination, + msg, + ) + raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE) - read_body, _ = await download_ratelimiter.can_do_action( - requester=None, - key=ip_address, - n_actions=expected_size, - ) - if not read_body: - msg = "Requested file size exceeds ratelimits" - logger.warning( - "{%s} [%s] %s", - request.txn_id, - request.destination, - msg, + read_body, _ = await download_ratelimiter.can_do_action( + requester=None, + key=ip_address, + n_actions=expected_size, ) - raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED) + if not read_body: + msg = "Requested file size exceeds ratelimits" + logger.warning( + "{%s} [%s] %s", + request.txn_id, + request.destination, + msg, + ) + raise SynapseError( + HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED + ) # this should be a multipart/mixed response with the boundary string in the header try: @@ -1672,11 +1698,12 @@ class MatrixFederationHttpClient: raise SynapseError(HTTPStatus.BAD_GATEWAY, msg) try: - # add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >= - deferred = read_multipart_response( - response, output_stream, boundary, expected_size + 1 - ) - deferred.addTimeout(self.default_timeout_seconds, self.reactor) + async with self.remote_download_linearizer.queue(ip_address): + # add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >= + deferred = read_multipart_response( + response, output_stream, boundary, expected_size + 1 + ) + deferred.addTimeout(self.default_timeout_seconds, self.reactor) except BodyExceededMaxSize: msg = "Requested file is too large > %r bytes" % (expected_size,) logger.warning( @@ -1743,6 +1770,13 @@ class MatrixFederationHttpClient: request.method, request.uri.decode("ascii"), ) + + # if we didn't know the length upfront, decrement the actual size from ratelimiter + if response.length == UNKNOWN_LENGTH: + download_ratelimiter.record_action( + requester=None, key=ip_address, n_actions=length + ) + return length, headers, multipart_response.json diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index 70912e22f8..e55001fb40 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -1057,13 +1057,15 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel.code == 200 + @override_config({"remote_media_download_burst_count": "87M"}) @patch( "synapse.http.matrixfederationclient.read_body_with_max_size", read_body_with_max_size_30MiB, ) - def test_download_ratelimit_max_size_sub(self) -> None: + def test_download_ratelimit_unknown_length(self) -> None: """ - Test that if no content-length is provided, the default max size is applied instead + Test that if no content-length is provided, ratelimit will still be applied after + download once length is known """ # mock out actually sending the request @@ -1077,19 +1079,48 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): self.client._send_request = _send_request # type: ignore - # ten requests should go through using the max size (500MB/50MB) - for i in range(10): - channel2 = self.make_request( + # 3 requests should go through (note 3rd one would technically violate ratelimit but + # is applied *after* download - the next one will be ratelimited) + for i in range(3): + channel = self.make_request( "GET", f"/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxy{i}", shorthand=False, ) - assert channel2.code == 200 + assert channel.code == 200 - # eleventh will hit ratelimit - channel3 = self.make_request( + # 4th will hit ratelimit + channel2 = self.make_request( "GET", "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyx", shorthand=False, ) - assert channel3.code == 429 + assert channel2.code == 429 + + @override_config({"max_upload_size": "29M"}) + @patch( + "synapse.http.matrixfederationclient.read_body_with_max_size", + read_body_with_max_size_30MiB, + ) + def test_max_download_respected(self) -> None: + """ + Test that the max download size is enforced - note that max download size is determined + by the max_upload_size + """ + + # mock out actually sending the request + async def _send_request(*args: Any, **kwargs: Any) -> IResponse: + resp = MagicMock(spec=IResponse) + resp.code = 200 + resp.length = 31457280 + resp.headers = Headers({"Content-Type": ["application/octet-stream"]}) + resp.phrase = b"OK" + return resp + + self.client._send_request = _send_request # type: ignore + + channel = self.make_request( + "GET", "/_matrix/media/v3/download/remote.org/abcd", shorthand=False + ) + assert channel.code == 502 + assert channel.json_body["errcode"] == "M_TOO_LARGE" diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py index 7f2caed7d5..466c5a0b70 100644 --- a/tests/rest/client/test_media.py +++ b/tests/rest/client/test_media.py @@ -1809,13 +1809,19 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel.code == 200 + @override_config( + { + "remote_media_download_burst_count": "87M", + } + ) @patch( "synapse.http.matrixfederationclient.read_multipart_response", read_multipart_response_30MiB, ) - def test_download_ratelimit_max_size_sub(self) -> None: + def test_download_ratelimit_unknown_length(self) -> None: """ - Test that if no content-length is provided, the default max size is applied instead + Test that if no content-length is provided, ratelimiting is still applied after + media is downloaded and length is known """ # mock out actually sending the request @@ -1831,8 +1837,9 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): self.client._send_request = _send_request # type: ignore - # ten requests should go through using the max size (500MB/50MB) - for i in range(10): + # first 3 will go through (note that 3rd request technically violates rate limit but + # that since the ratelimiting is applied *after* download it goes through, but next one fails) + for i in range(3): channel2 = self.make_request( "GET", f"/_matrix/client/v1/media/download/remote.org/abc{i}", @@ -1841,7 +1848,7 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel2.code == 200 - # eleventh will hit ratelimit + # 4th will hit ratelimit channel3 = self.make_request( "GET", "/_matrix/client/v1/media/download/remote.org/abcd", @@ -1850,6 +1857,39 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel3.code == 429 + @override_config({"max_upload_size": "29M"}) + @patch( + "synapse.http.matrixfederationclient.read_multipart_response", + read_multipart_response_30MiB, + ) + def test_max_download_respected(self) -> None: + """ + Test that the max download size is enforced - note that max download size is determined + by the max_upload_size + """ + + # mock out actually sending the request, returns a 30MiB response + async def _send_request(*args: Any, **kwargs: Any) -> IResponse: + resp = MagicMock(spec=IResponse) + resp.code = 200 + resp.length = 31457280 + resp.headers = Headers( + {"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]} + ) + resp.phrase = b"OK" + return resp + + self.client._send_request = _send_request # type: ignore + + channel = self.make_request( + "GET", + "/_matrix/client/v1/media/download/remote.org/abcd", + shorthand=False, + access_token=self.tok, + ) + assert channel.code == 502 + assert channel.json_body["errcode"] == "M_TOO_LARGE" + def test_file_download(self) -> None: content = io.BytesIO(b"file_to_stream") content_uri = self.get_success( From 83894180b2a4345d4b1c51d9268136b6167858e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 11:35:47 +0100 Subject: [PATCH 043/332] Bump matrix-org/done-action from 2 to 3 (#17440) --- .github/workflows/tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 767495101b..730f8552d9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -305,7 +305,7 @@ jobs: - lint-readme runs-on: ubuntu-latest steps: - - uses: matrix-org/done-action@v2 + - uses: matrix-org/done-action@v3 with: needs: ${{ toJSON(needs) }} @@ -737,7 +737,7 @@ jobs: - linting-done runs-on: ubuntu-latest steps: - - uses: matrix-org/done-action@v2 + - uses: matrix-org/done-action@v3 with: needs: ${{ toJSON(needs) }} From 574aa53126c238148189f80b37b2ad14052cc429 Mon Sep 17 00:00:00 2001 From: Till Faelligen <2353100+S7evinK@users.noreply.github.com> Date: Tue, 16 Jul 2024 12:55:26 +0200 Subject: [PATCH 044/332] 1.111.0 --- CHANGES.md | 7 +++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 8279960b5b..0a2b816ed1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.111.0 (2024-07-16) + +No significant changes since 1.111.0rc2. + + + + # Synapse 1.111.0rc2 (2024-07-10) ### Bugfixes diff --git a/debian/changelog b/debian/changelog index 0f3dcc64e6..0470e25f2d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.111.0) stable; urgency=medium + + * New Synapse release 1.111.0. + + -- Synapse Packaging team Tue, 16 Jul 2024 12:42:46 +0200 + matrix-synapse-py3 (1.111.0~rc2) stable; urgency=medium * New synapse release 1.111.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 41de90f9f6..0f040fc612 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.111.0rc2" +version = "1.111.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 79924aebefbcc2663f541efe083ae1fdbb10eab6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 12:08:06 +0100 Subject: [PATCH 045/332] Bump mypy from 1.9.0 to 1.10.1 (#17445) --- poetry.lock | 56 +++++++++++++-------------- tests/util/test_check_dependencies.py | 3 +- 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4afedfbe0b..4092a41884 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1389,38 +1389,38 @@ files = [ [[package]] name = "mypy" -version = "1.9.0" +version = "1.10.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, + {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, + {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, + {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, + {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, + {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, + {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, + {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, + {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, + {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, + {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, + {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, + {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, + {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, + {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, + {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, + {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, + {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, ] [package.dependencies] diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index fb67146c69..13a4e6ddaa 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -21,6 +21,7 @@ from contextlib import contextmanager from os import PathLike +from pathlib import Path from typing import Generator, Optional, Union from unittest.mock import patch @@ -41,7 +42,7 @@ class DummyDistribution(metadata.Distribution): def version(self) -> str: return self._version - def locate_file(self, path: Union[str, PathLike]) -> PathLike: + def locate_file(self, path: Union[str, PathLike]) -> Path: raise NotImplementedError() def read_text(self, filename: str) -> None: From 3fee32ed6be7eeb5845b0ce199ba29fa49fdd41e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 Jul 2024 13:10:15 -0500 Subject: [PATCH 046/332] Order `heroes` by `stream_ordering` (as spec'ed) (#17435) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The spec specifically mentions `stream_ordering` but that's a Synapse specific concept. In any case, the essence of the spec is basically the first 5 members of the room which `stream_ordering` accomplishes. Split off from https://github.com/element-hq/synapse/pull/17419#discussion_r1671342794 ## Spec compliance > This should be the first 5 members of the room, **ordered by stream ordering**, which are joined or invited. The list must never include the client’s own user ID. When no joined or invited members are available, this should consist of the banned and left users. > > *-- https://spec.matrix.org/v1.10/client-server-api/#_matrixclientv3sync_roomsummary* Related to https://github.com/matrix-org/matrix-spec/issues/1334 --- changelog.d/17435.bugfix | 1 + synapse/storage/databases/main/roommember.py | 57 ++- tests/rest/client/test_sync.py | 21 +- tests/storage/test_roommember.py | 403 ++++++++++++++++++- 4 files changed, 456 insertions(+), 26 deletions(-) create mode 100644 changelog.d/17435.bugfix diff --git a/changelog.d/17435.bugfix b/changelog.d/17435.bugfix new file mode 100644 index 0000000000..2d06a7c7fc --- /dev/null +++ b/changelog.d/17435.bugfix @@ -0,0 +1 @@ +Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 5d2fd08495..f62d9f705d 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -279,8 +279,19 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): @cached(max_entries=100000) # type: ignore[synapse-@cached-mutable] async def get_room_summary(self, room_id: str) -> Mapping[str, MemberSummary]: - """Get the details of a room roughly suitable for use by the room + """ + Get the details of a room roughly suitable for use by the room summary extension to /sync. Useful when lazy loading room members. + + Returns the total count of members in the room by membership type, and a + truncated list of members (the heroes). This will be the first 6 members of the + room: + - We want 5 heroes plus 1, in case one of them is the + calling user. + - They are ordered by `stream_ordering`, which are joined or + invited. When no joined or invited members are available, this also includes + banned and left users. + Args: room_id: The room ID to query Returns: @@ -308,23 +319,36 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): for count, membership in txn: res.setdefault(membership, MemberSummary([], count)) - # we order by membership and then fairly arbitrarily by event_id so - # heroes are consistent - # Note, rejected events will have a null membership field, so - # we we manually filter them out. + # Order by membership (joins -> invites -> leave (former insiders) -> + # everything else (outsiders like bans/knocks), then by `stream_ordering` so + # the first members in the room show up first and to make the sort stable + # (consistent heroes). + # + # Note: rejected events will have a null membership field, so we we manually + # filter them out. sql = """ SELECT state_key, membership, event_id FROM current_state_events WHERE type = 'm.room.member' AND room_id = ? AND membership IS NOT NULL ORDER BY - CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, - event_id ASC + CASE membership WHEN ? THEN 1 WHEN ? THEN 2 WHEN ? THEN 3 ELSE 4 END ASC, + event_stream_ordering ASC LIMIT ? """ - # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. - txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) + txn.execute( + sql, + ( + room_id, + # Sort order + Membership.JOIN, + Membership.INVITE, + Membership.LEAVE, + # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. + 6, + ), + ) for user_id, membership, event_id in txn: summary = res[membership] # we will always have a summary for this membership type at this @@ -1509,10 +1533,19 @@ def extract_heroes_from_room_summary( ) -> List[str]: """Determine the users that represent a room, from the perspective of the `me` user. + This function expects `MemberSummary.members` to already be sorted by + `stream_ordering` like the results from `get_room_summary(...)`. + The rules which say which users we select are specified in the "Room Summary" section of https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv3sync + + Args: + details: Mapping from membership type to member summary. We expect + `MemberSummary.members` to already be sorted by `stream_ordering`. + me: The user for whom we are determining the heroes for. + Returns a list (possibly empty) of heroes' mxids. """ empty_ms = MemberSummary([], 0) @@ -1527,11 +1560,11 @@ def extract_heroes_from_room_summary( r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me ] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me] - # FIXME: order by stream ordering rather than as returned by SQL + # We expect `MemberSummary.members` to already be sorted by `stream_ordering` if joined_user_ids or invited_user_ids: - return sorted(joined_user_ids + invited_user_ids)[0:5] + return (joined_user_ids + invited_user_ids)[0:5] else: - return sorted(gone_user_ids)[0:5] + return gone_user_ids[0:5] @attr.s(slots=True, auto_attribs=True) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index f5d57e689c..a008ee465b 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -2160,18 +2160,15 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # Room2 doesn't have a name so we should see `heroes` populated self.assertIsNone(channel.json_body["rooms"][room_id1].get("name")) - # FIXME: Remove this basic assertion and uncomment the better assertion below - # after https://github.com/element-hq/synapse/pull/17435 merges - self.assertEqual(len(channel.json_body["rooms"][room_id1].get("heroes", [])), 5) - # self.assertCountEqual( - # [ - # hero["user_id"] - # for hero in channel.json_body["rooms"][room_id1].get("heroes", []) - # ], - # # Heroes should be the first 5 users in the room (excluding the user - # # themselves, we shouldn't see `user1`) - # [user2_id, user3_id, user4_id, user5_id, user6_id], - # ) + self.assertCountEqual( + [ + hero["user_id"] + for hero in channel.json_body["rooms"][room_id1].get("heroes", []) + ], + # Heroes should be the first 5 users in the room (excluding the user + # themselves, we shouldn't see `user1`) + [user2_id, user3_id, user4_id, user5_id, user6_id], + ) self.assertEqual( channel.json_body["rooms"][room_id1]["joined_count"], 7, diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 882f3bbbdc..418b556108 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -19,20 +19,28 @@ # [This file includes modifications made by New Vector Limited] # # +import logging from typing import List, Optional, Tuple, cast from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import Membership +from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.room_versions import RoomVersions +from synapse.rest import admin from synapse.rest.admin import register_servlets_for_client_rest_resource -from synapse.rest.client import login, room +from synapse.rest.client import knock, login, room from synapse.server import HomeServer +from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary +from synapse.storage.roommember import MemberSummary from synapse.types import UserID, create_requester from synapse.util import Clock from tests import unittest from tests.server import TestHomeServer from tests.test_utils import event_injection +from tests.unittest import skip_unless + +logger = logging.getLogger(__name__) class RoomMemberStoreTestCase(unittest.HomeserverTestCase): @@ -240,6 +248,397 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): ) +class RoomSummaryTestCase(unittest.HomeserverTestCase): + """ + Test `/sync` room summary related logic like `get_room_summary(...)` and + `extract_heroes_from_room_summary(...)` + """ + + servlets = [ + admin.register_servlets, + knock.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.sliding_sync_handler = self.hs.get_sliding_sync_handler() + self.store = self.hs.get_datastores().main + + def _assert_member_summary( + self, + actual_member_summary: MemberSummary, + expected_member_list: List[str], + *, + expected_member_count: Optional[int] = None, + ) -> None: + """ + Assert that the `MemberSummary` object has the expected members. + """ + self.assertListEqual( + [ + user_id + for user_id, _membership_event_id in actual_member_summary.members + ], + expected_member_list, + ) + self.assertEqual( + actual_member_summary.count, + ( + expected_member_count + if expected_member_count is not None + else len(expected_member_list) + ), + ) + + def test_get_room_summary_membership(self) -> None: + """ + Test that `get_room_summary(...)` gets every kind of membership when there + aren't that many members in the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + + # Setup a room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # User2 is banned + self.helper.join(room_id, user2_id, tok=user2_tok) + self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok) + + # User3 is invited by user1 + self.helper.invite(room_id, targ=user3_id, tok=user1_tok) + + # User4 leaves + self.helper.join(room_id, user4_id, tok=user4_tok) + self.helper.leave(room_id, user4_id, tok=user4_tok) + + # User5 joins + self.helper.join(room_id, user5_id, tok=user5_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + empty_ms = MemberSummary([], 0) + + self._assert_member_summary( + room_membership_summary.get(Membership.JOIN, empty_ms), + [user1_id, user5_id], + ) + self._assert_member_summary( + room_membership_summary.get(Membership.INVITE, empty_ms), [user3_id] + ) + self._assert_member_summary( + room_membership_summary.get(Membership.LEAVE, empty_ms), [user4_id] + ) + self._assert_member_summary( + room_membership_summary.get(Membership.BAN, empty_ms), [user2_id] + ) + self._assert_member_summary( + room_membership_summary.get(Membership.KNOCK, empty_ms), + [ + # No one knocked + ], + ) + + def test_get_room_summary_membership_order(self) -> None: + """ + Test that `get_room_summary(...)` stacks our limit of 6 in this order: joins -> + invites -> leave -> everything else (bans/knocks) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + user6_id = self.register_user("user6", "pass") + user6_tok = self.login(user6_id, "pass") + user7_id = self.register_user("user7", "pass") + user7_tok = self.login(user7_id, "pass") + + # Setup the room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # We expect the order to be joins -> invites -> leave -> bans so setup the users + # *NOT* in that same order to make sure we're actually sorting them. + + # User2 is banned + self.helper.join(room_id, user2_id, tok=user2_tok) + self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok) + + # User3 is invited by user1 + self.helper.invite(room_id, targ=user3_id, tok=user1_tok) + + # User4 leaves + self.helper.join(room_id, user4_id, tok=user4_tok) + self.helper.leave(room_id, user4_id, tok=user4_tok) + + # User5, User6, User7 joins + self.helper.join(room_id, user5_id, tok=user5_tok) + self.helper.join(room_id, user6_id, tok=user6_tok) + self.helper.join(room_id, user7_id, tok=user7_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + empty_ms = MemberSummary([], 0) + + self._assert_member_summary( + room_membership_summary.get(Membership.JOIN, empty_ms), + [user1_id, user5_id, user6_id, user7_id], + ) + self._assert_member_summary( + room_membership_summary.get(Membership.INVITE, empty_ms), [user3_id] + ) + self._assert_member_summary( + room_membership_summary.get(Membership.LEAVE, empty_ms), [user4_id] + ) + self._assert_member_summary( + room_membership_summary.get(Membership.BAN, empty_ms), + [ + # The banned user is not in the summary because the summary can only fit + # 6 members and prefers everything else before bans + # + # user2_id + ], + # But we still see the count of banned users + expected_member_count=1, + ) + self._assert_member_summary( + room_membership_summary.get(Membership.KNOCK, empty_ms), + [ + # No one knocked + ], + ) + + def test_extract_heroes_from_room_summary_excludes_self(self) -> None: + """ + Test that `extract_heroes_from_room_summary(...)` does not include the user + itself. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Setup the room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # User2 joins + self.helper.join(room_id, user2_id, tok=user2_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + + # We first ask from the perspective of a random fake user + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me="@fakeuser" + ) + + # Make sure user1 is in the room (ensure our test setup is correct) + self.assertListEqual(hero_user_ids, [user1_id, user2_id]) + + # Now, we ask for the room summary from the perspective of user1 + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me=user1_id + ) + + # User1 should not be included in the list of heroes because they are the one + # asking + self.assertListEqual(hero_user_ids, [user2_id]) + + def test_extract_heroes_from_room_summary_first_five_joins(self) -> None: + """ + Test that `extract_heroes_from_room_summary(...)` returns the first 5 joins. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + user6_id = self.register_user("user6", "pass") + user6_tok = self.login(user6_id, "pass") + user7_id = self.register_user("user7", "pass") + user7_tok = self.login(user7_id, "pass") + + # Setup the room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # User2 -> User7 joins + self.helper.join(room_id, user2_id, tok=user2_tok) + self.helper.join(room_id, user3_id, tok=user3_tok) + self.helper.join(room_id, user4_id, tok=user4_tok) + self.helper.join(room_id, user5_id, tok=user5_tok) + self.helper.join(room_id, user6_id, tok=user6_tok) + self.helper.join(room_id, user7_id, tok=user7_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me="@fakuser" + ) + + # First 5 users to join the room + self.assertListEqual( + hero_user_ids, [user1_id, user2_id, user3_id, user4_id, user5_id] + ) + + def test_extract_heroes_from_room_summary_membership_order(self) -> None: + """ + Test that `extract_heroes_from_room_summary(...)` prefers joins/invites over + everything else. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + + # Setup the room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # We expect the order to be joins -> invites -> leave -> bans so setup the users + # *NOT* in that same order to make sure we're actually sorting them. + + # User2 is banned + self.helper.join(room_id, user2_id, tok=user2_tok) + self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok) + + # User3 is invited by user1 + self.helper.invite(room_id, targ=user3_id, tok=user1_tok) + + # User4 leaves + self.helper.join(room_id, user4_id, tok=user4_tok) + self.helper.leave(room_id, user4_id, tok=user4_tok) + + # User5 joins + self.helper.join(room_id, user5_id, tok=user5_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me="@fakeuser" + ) + + # Prefer joins -> invites, over everything else + self.assertListEqual( + hero_user_ids, + [ + # The joins + user1_id, + user5_id, + # The invites + user3_id, + ], + ) + + @skip_unless( + False, + "Test is not possible because when everyone leaves the room, " + + "the server is `no_longer_in_room` and we don't have any `current_state_events` to query", + ) + def test_extract_heroes_from_room_summary_fallback_leave_ban(self) -> None: + """ + Test that `extract_heroes_from_room_summary(...)` falls back to leave/ban if + there aren't any joins/invites. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + # Setup the room (user1 is the creator and is joined to the room) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # User2 is banned + self.helper.join(room_id, user2_id, tok=user2_tok) + self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok) + + # User3 leaves + self.helper.join(room_id, user3_id, tok=user3_tok) + self.helper.leave(room_id, user3_id, tok=user3_tok) + + # User1 leaves (we're doing this last because they're the room creator) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + room_membership_summary = self.get_success(self.store.get_room_summary(room_id)) + + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me="@fakeuser" + ) + + # Fallback to people who left -> banned + self.assertListEqual( + hero_user_ids, + [user3_id, user1_id, user3_id], + ) + + def test_extract_heroes_from_room_summary_excludes_knocks(self) -> None: + """ + People who knock on the room have (potentially) never been in the room before + and are total outsiders. Plus the spec doesn't mention them at all for heroes. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Setup the knock room (user1 is the creator and is joined to the room) + knock_room_id = self.helper.create_room_as( + user1_id, tok=user1_tok, room_version=RoomVersions.V7.identifier + ) + self.helper.send_state( + knock_room_id, + EventTypes.JoinRules, + {"join_rule": JoinRules.KNOCK}, + tok=user1_tok, + ) + + # User2 knocks on the room + knock_channel = self.make_request( + "POST", + "/_matrix/client/r0/knock/%s" % (knock_room_id,), + b"{}", + user2_tok, + ) + self.assertEqual(knock_channel.code, 200, knock_channel.result) + + room_membership_summary = self.get_success( + self.store.get_room_summary(knock_room_id) + ) + + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me="@fakeuser" + ) + + # user1 is the creator and is joined to the room (should show up as a hero) + # user2 is knocking on the room (should not show up as a hero) + self.assertListEqual( + hero_user_ids, + [user1_id], + ) + + class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main From a574de006286352e150b397f34fecc0d83e68148 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 18 Jul 2024 06:49:53 -0500 Subject: [PATCH 047/332] Add `m.room.create` to default bump event types (#17453) Add `m.room.create` to default bump event types This probably helps when no messages have been sent in the room and it was just created. --- changelog.d/17453.misc | 1 + synapse/handlers/sliding_sync.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/17453.misc diff --git a/changelog.d/17453.misc b/changelog.d/17453.misc new file mode 100644 index 0000000000..2978a52477 --- /dev/null +++ b/changelog.d/17453.misc @@ -0,0 +1 @@ +Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 1b5262d667..a23a6b9dd9 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -53,6 +53,7 @@ logger = logging.getLogger(__name__) # The event types that clients should consider as new activity. DEFAULT_BUMP_EVENT_TYPES = { + EventTypes.Create, EventTypes.Message, EventTypes.Encrypted, EventTypes.Sticker, From f583f1dce409210c08cbdd7fc581b1c8e47dffb9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jul 2024 12:59:53 +0100 Subject: [PATCH 048/332] Revert "Bump setuptools from 67.6.0 to 70.0.0" (#17455) Reverts element-hq/synapse#17448 We hit a bug when deploying with synctl: ``` Traceback (most recent call last): File "/home/synapse/env-python311/bin/synctl", line 33, in sys.exit(load_entry_point('matrix-synapse', 'console_scripts', 'synctl')()) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/bin/synctl", line 25, in importlib_load_entry_point return next(matches).load() ^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/importlib/metadata/__init__.py", line 202, in load module = import_module(match.group('module')) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "", line 1204, in _gcd_import File "", line 1176, in _find_and_load File "", line 1147, in _find_and_load_unlocked File "", line 690, in _load_unlocked File "", line 940, in exec_module File "", line 241, in _call_with_frames_removed File "/home/synapse/src/synapse/_scripts/synctl.py", line 37, in from synapse.config import find_config_files File "/home/synapse/src/synapse/config/__init__.py", line 22, in from ._base import ConfigError, find_config_files File "/home/synapse/src/synapse/config/_base.py", line 49, in import pkg_resources File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 3282, in @_call_aside ^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 3266, in _call_aside f(*args, **kwargs) File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 3295, in _initialize_master_working_set working_set = _declare_state('object', 'working_set', WorkingSet._build_master()) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 589, in _build_master ws.require(__requires__) File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 926, in require needed = self.resolve(parse_requirements(requirements)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 787, in resolve dist = self._resolve_dist( ^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 816, in _resolve_dist env = Environment(self.entries) ^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 1014, in __init__ self.scan(search_path) File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 1046, in scan for dist in find_distributions(item): File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2091, in find_on_path yield from factory(fullpath) ^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2183, in resolve_egg_link return next(dist_groups, ()) ^^^^^^^^^^^^^^^^^^^^^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2179, in resolved_paths = ( ^ File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2167, in non_empty_lines for line in _read_utf8_with_fallback(path).splitlines(): ^^^^^^^^^^^^^^^^^^^^^^^^ NameError: name '_read_utf8_with_fallback' is not defined ``` --- poetry.lock | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4092a41884..b3e45972f1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2504,18 +2504,19 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"] [[package]] name = "setuptools" -version = "70.0.0" +version = "67.6.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, - {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, + {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"}, + {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "setuptools-rust" From 6a01af59e14b67960e290c26131249fb8c833293 Mon Sep 17 00:00:00 2001 From: Ben Banfield-Zanin Date: Thu, 18 Jul 2024 13:32:32 +0100 Subject: [PATCH 049/332] Improve default_power_level_content_override documentation (#17451) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17451.doc | 1 + .../configuration/config_documentation.md | 32 +++++++++++++++++++ synapse/handlers/room.py | 2 ++ 3 files changed, 35 insertions(+) create mode 100644 changelog.d/17451.doc diff --git a/changelog.d/17451.doc b/changelog.d/17451.doc new file mode 100644 index 0000000000..357ac2c906 --- /dev/null +++ b/changelog.d/17451.doc @@ -0,0 +1 @@ +Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 65b03ad0f8..38b24b5044 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -4134,6 +4134,38 @@ default_power_level_content_override: trusted_private_chat: null public_chat: null ``` + +The default power levels for each preset are: +```yaml +"m.room.name": 50 +"m.room.power_levels": 100 +"m.room.history_visibility": 100 +"m.room.canonical_alias": 50 +"m.room.avatar": 50 +"m.room.tombstone": 100 +"m.room.server_acl": 100 +"m.room.encryption": 100 +``` + +So a complete example where the default power-levels for a preset are maintained +but the power level for a new key is set is: +```yaml +default_power_level_content_override: + private_chat: + events: + "com.example.foo": 0 + "m.room.name": 50 + "m.room.power_levels": 100 + "m.room.history_visibility": 100 + "m.room.canonical_alias": 50 + "m.room.avatar": 50 + "m.room.tombstone": 100 + "m.room.server_acl": 100 + "m.room.encryption": 100 + trusted_private_chat: null + public_chat: null +``` + --- ### `forget_rooms_on_leave` diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 2302d283a7..262d9f4044 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1188,6 +1188,8 @@ class RoomCreationHandler: ) events_to_send.append((power_event, power_context)) else: + # Please update the docs for `default_power_level_content_override` when + # updating the `events` dict below power_level_content: JsonDict = { "users": {creator_id: 100}, "users_default": 0, From 71d83477cb494db7f8384eedf127e59c8c8cd479 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Jul 2024 11:02:38 +0100 Subject: [PATCH 050/332] Bump sentry-sdk from 2.6.0 to 2.8.0 (#17456) --- poetry.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index b3e45972f1..2bfcb59cf2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2430,13 +2430,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.6.0" +version = "2.8.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.6.0-py2.py3-none-any.whl", hash = "sha256:422b91cb49378b97e7e8d0e8d5a1069df23689d45262b86f54988a7db264e874"}, - {file = "sentry_sdk-2.6.0.tar.gz", hash = "sha256:65cc07e9c6995c5e316109f138570b32da3bd7ff8d0d0ee4aaf2628c3dd8127d"}, + {file = "sentry_sdk-2.8.0-py2.py3-none-any.whl", hash = "sha256:6051562d2cfa8087bb8b4b8b79dc44690f8a054762a29c07e22588b1f619bfb5"}, + {file = "sentry_sdk-2.8.0.tar.gz", hash = "sha256:aa4314f877d9cd9add5a0c9ba18e3f27f99f7de835ce36bd150e48a41c7c646f"}, ] [package.dependencies] @@ -2466,7 +2466,7 @@ langchain = ["langchain (>=0.0.210)"] loguru = ["loguru (>=0.5)"] openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] -opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"] +opentelemetry-experimental = ["opentelemetry-instrumentation-aio-pika (==0.46b0)", "opentelemetry-instrumentation-aiohttp-client (==0.46b0)", "opentelemetry-instrumentation-aiopg (==0.46b0)", "opentelemetry-instrumentation-asgi (==0.46b0)", "opentelemetry-instrumentation-asyncio (==0.46b0)", "opentelemetry-instrumentation-asyncpg (==0.46b0)", "opentelemetry-instrumentation-aws-lambda (==0.46b0)", "opentelemetry-instrumentation-boto (==0.46b0)", "opentelemetry-instrumentation-boto3sqs (==0.46b0)", "opentelemetry-instrumentation-botocore (==0.46b0)", "opentelemetry-instrumentation-cassandra (==0.46b0)", "opentelemetry-instrumentation-celery (==0.46b0)", "opentelemetry-instrumentation-confluent-kafka (==0.46b0)", "opentelemetry-instrumentation-dbapi (==0.46b0)", "opentelemetry-instrumentation-django (==0.46b0)", "opentelemetry-instrumentation-elasticsearch (==0.46b0)", "opentelemetry-instrumentation-falcon (==0.46b0)", "opentelemetry-instrumentation-fastapi (==0.46b0)", "opentelemetry-instrumentation-flask (==0.46b0)", "opentelemetry-instrumentation-grpc (==0.46b0)", "opentelemetry-instrumentation-httpx (==0.46b0)", "opentelemetry-instrumentation-jinja2 (==0.46b0)", "opentelemetry-instrumentation-kafka-python (==0.46b0)", "opentelemetry-instrumentation-logging (==0.46b0)", "opentelemetry-instrumentation-mysql (==0.46b0)", "opentelemetry-instrumentation-mysqlclient (==0.46b0)", "opentelemetry-instrumentation-pika (==0.46b0)", "opentelemetry-instrumentation-psycopg (==0.46b0)", "opentelemetry-instrumentation-psycopg2 (==0.46b0)", "opentelemetry-instrumentation-pymemcache (==0.46b0)", "opentelemetry-instrumentation-pymongo (==0.46b0)", "opentelemetry-instrumentation-pymysql (==0.46b0)", "opentelemetry-instrumentation-pyramid (==0.46b0)", "opentelemetry-instrumentation-redis (==0.46b0)", "opentelemetry-instrumentation-remoulade (==0.46b0)", "opentelemetry-instrumentation-requests (==0.46b0)", "opentelemetry-instrumentation-sklearn (==0.46b0)", "opentelemetry-instrumentation-sqlalchemy (==0.46b0)", "opentelemetry-instrumentation-sqlite3 (==0.46b0)", "opentelemetry-instrumentation-starlette (==0.46b0)", "opentelemetry-instrumentation-system-metrics (==0.46b0)", "opentelemetry-instrumentation-threading (==0.46b0)", "opentelemetry-instrumentation-tornado (==0.46b0)", "opentelemetry-instrumentation-tortoiseorm (==0.46b0)", "opentelemetry-instrumentation-urllib (==0.46b0)", "opentelemetry-instrumentation-urllib3 (==0.46b0)", "opentelemetry-instrumentation-wsgi (==0.46b0)"] pure-eval = ["asttokens", "executing", "pure-eval"] pymongo = ["pymongo (>=3.1)"] pyspark = ["pyspark (>=2.4.4)"] @@ -2476,7 +2476,7 @@ sanic = ["sanic (>=0.8)"] sqlalchemy = ["sqlalchemy (>=1.2)"] starlette = ["starlette (>=0.19.1)"] starlite = ["starlite (>=1.48)"] -tornado = ["tornado (>=5)"] +tornado = ["tornado (>=6)"] [[package]] name = "service-identity" From 43c865f7c98a1e31d6eb8d3979d1e199fadcb950 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Jul 2024 12:09:39 +0100 Subject: [PATCH 051/332] Generate room sync data concurrently (#17458) This is also what we do for standard `/sync`. --- changelog.d/17458.misc | 1 + synapse/handlers/sliding_sync.py | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17458.misc diff --git a/changelog.d/17458.misc b/changelog.d/17458.misc new file mode 100644 index 0000000000..09cce15d0d --- /dev/null +++ b/changelog.d/17458.misc @@ -0,0 +1 @@ +Speed up generating sliding sync responses. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index a23a6b9dd9..423f0329d6 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -28,6 +28,7 @@ from synapse.api.constants import AccountDataTypes, Direction, EventTypes, Membe from synapse.events import EventBase from synapse.events.utils import strip_event from synapse.handlers.relations import BundledAggregations +from synapse.logging.opentracing import start_active_span, tag_args, trace from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.databases.main.stream import CurrentStateDeltaMembership from synapse.storage.roommember import MemberSummary @@ -43,6 +44,7 @@ from synapse.types import ( ) from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult from synapse.types.state import StateFilter +from synapse.util.async_helpers import concurrently_execute from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -592,11 +594,14 @@ class SlidingSyncHandler: # Fetch room data rooms: Dict[str, SlidingSyncResult.RoomResult] = {} - for room_id, room_sync_config in relevant_room_map.items(): + + @trace + @tag_args + async def handle_room(room_id: str) -> None: room_sync_result = await self.get_room_sync_data( user=sync_config.user, room_id=room_id, - room_sync_config=room_sync_config, + room_sync_config=relevant_room_map[room_id], room_membership_for_user_at_to_token=room_membership_for_user_map[ room_id ], @@ -606,6 +611,9 @@ class SlidingSyncHandler: rooms[room_id] = room_sync_result + with start_active_span("sliding_sync.generate_room_entries"): + await concurrently_execute(handle_room, relevant_room_map, 10) + extensions = await self.get_extensions_response( sync_config=sync_config, to_token=to_token ) From d3f9afd8d9db8c80b342177b9ab162c79357c431 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Jul 2024 16:19:15 +0100 Subject: [PATCH 052/332] Add a cache on `get_rooms_for_local_user_where_membership_is` (#17460) As it gets used in sliding sync. We basically invalidate it in all the same places as `get_rooms_for_user`. Most of the changes are due to needing the arguments you pass in to be hashable (which lists aren't) --- changelog.d/17460.misc | 1 + synapse/api/constants.py | 2 +- synapse/storage/_base.py | 6 +++++ synapse/storage/databases/main/cache.py | 6 +++++ synapse/storage/databases/main/roommember.py | 26 +++++++++++++++++--- tests/handlers/test_sync.py | 1 + 6 files changed, 38 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17460.misc diff --git a/changelog.d/17460.misc b/changelog.d/17460.misc new file mode 100644 index 0000000000..fd99da5a95 --- /dev/null +++ b/changelog.d/17460.misc @@ -0,0 +1 @@ +Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 12d18137e0..85001d9676 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -50,7 +50,7 @@ class Membership: KNOCK: Final = "knock" LEAVE: Final = "leave" BAN: Final = "ban" - LIST: Final = {INVITE, JOIN, KNOCK, LEAVE, BAN} + LIST: Final = frozenset((INVITE, JOIN, KNOCK, LEAVE, BAN)) class PresenceState: diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 881888fa93..066f3d08ae 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -120,6 +120,9 @@ class SQLBaseStore(metaclass=ABCMeta): "get_user_in_room_with_profile", (room_id, user_id) ) self._attempt_to_invalidate_cache("get_rooms_for_user", (user_id,)) + self._attempt_to_invalidate_cache( + "_get_rooms_for_local_user_where_membership_is_inner", (user_id,) + ) # Purge other caches based on room state. self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) @@ -146,6 +149,9 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None) self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None) self._attempt_to_invalidate_cache("get_rooms_for_user", None) + self._attempt_to_invalidate_cache( + "_get_rooms_for_local_user_where_membership_is_inner", None + ) self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) def _attempt_to_invalidate_cache( diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 2d6b75e47e..26b8e1a172 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -331,6 +331,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "get_invited_rooms_for_local_user", (state_key,) ) self._attempt_to_invalidate_cache("get_rooms_for_user", (state_key,)) + self._attempt_to_invalidate_cache( + "_get_rooms_for_local_user_where_membership_is_inner", (state_key,) + ) self._attempt_to_invalidate_cache( "did_forget", @@ -393,6 +396,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_thread_id_for_receipts", None) self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None) self._attempt_to_invalidate_cache("get_rooms_for_user", None) + self._attempt_to_invalidate_cache( + "_get_rooms_for_local_user_where_membership_is_inner", None + ) self._attempt_to_invalidate_cache("did_forget", None) self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) self._attempt_to_invalidate_cache("get_references_for_event", None) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index f62d9f705d..640ab123f0 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -445,9 +445,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): if not membership_list: return [] - rooms = await self.db_pool.runInteraction( - "get_rooms_for_local_user_where_membership_is", - self._get_rooms_for_local_user_where_membership_is_txn, + # Convert membership list to frozen set as a) it needs to be hashable, + # and b) we don't care about the order. + membership_list = frozenset(membership_list) + + rooms = await self._get_rooms_for_local_user_where_membership_is_inner( user_id, membership_list, ) @@ -466,6 +468,24 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): return [room for room in rooms if room.room_id not in rooms_to_exclude] + @cached(max_entries=1000, tree=True) + async def _get_rooms_for_local_user_where_membership_is_inner( + self, + user_id: str, + membership_list: Collection[str], + ) -> Sequence[RoomsForUser]: + if not membership_list: + return [] + + rooms = await self.db_pool.runInteraction( + "get_rooms_for_local_user_where_membership_is", + self._get_rooms_for_local_user_where_membership_is_txn, + user_id, + membership_list, + ) + + return rooms + def _get_rooms_for_local_user_where_membership_is_txn( self, txn: LoggingTransaction, diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 77aafa492e..fa55f76916 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -211,6 +211,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # Blow away caches (supported room versions can only change due to a restart). self.store.get_rooms_for_user.invalidate_all() + self.store._get_rooms_for_local_user_where_membership_is_inner.invalidate_all() self.store._get_event_cache.clear() self.store._event_ref.clear() From dc8ddc6472ba19905b3fd0c4f4da4088223e03b0 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 22 Jul 2024 02:33:17 -0700 Subject: [PATCH 053/332] Prepare for authenticated media freeze (#17433) As part of the rollout of [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) this PR adds support for designating authenticated media and ensuring that authenticated media is not served over unauthenticated endpoints. --- changelog.d/17433.feature | 1 + .../configuration/config_documentation.md | 12 + synapse/_scripts/synapse_port_db.py | 5 +- synapse/config/repository.py | 4 + synapse/media/media_repository.py | 39 +++- synapse/media/thumbnailer.py | 48 +++- synapse/rest/media/download_resource.py | 3 +- synapse/rest/media/thumbnail_resource.py | 5 +- .../databases/main/media_repository.py | 28 ++- synapse/storage/schema/__init__.py | 5 +- .../main/delta/86/01_authenticate_media.sql | 15 ++ tests/rest/client/test_media.py | 209 ++++++++++++++++++ 12 files changed, 362 insertions(+), 12 deletions(-) create mode 100644 changelog.d/17433.feature create mode 100644 synapse/storage/schema/main/delta/86/01_authenticate_media.sql diff --git a/changelog.d/17433.feature b/changelog.d/17433.feature new file mode 100644 index 0000000000..ac9b5dee69 --- /dev/null +++ b/changelog.d/17433.feature @@ -0,0 +1 @@ +Prepare for authenticated media freeze. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 38b24b5044..e8bc2df798 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1863,6 +1863,18 @@ federation_rr_transactions_per_room_per_second: 40 ## Media Store Config options related to Synapse's media store. +--- +### `enable_authenticated_media` + +When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy +unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false +after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but +this will change to true in a future Synapse release. + +Example configuration: +```yaml +enable_authenticated_media: true +``` --- ### `enable_media_repo` diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 3bb4a34938..5c6db8118f 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -119,18 +119,19 @@ BOOLEAN_COLUMNS = { "e2e_room_keys": ["is_verified"], "event_edges": ["is_state"], "events": ["processed", "outlier", "contains_url"], - "local_media_repository": ["safe_from_quarantine"], + "local_media_repository": ["safe_from_quarantine", "authenticated"], + "per_user_experimental_features": ["enabled"], "presence_list": ["accepted"], "presence_stream": ["currently_active"], "public_room_list_stream": ["visibility"], "pushers": ["enabled"], "redactions": ["have_censored"], + "remote_media_cache": ["authenticated"], "room_stats_state": ["is_federatable"], "rooms": ["is_public", "has_auth_chain_index"], "users": ["shadow_banned", "approved", "locked", "suspended"], "un_partial_stated_event_stream": ["rejection_status_changed"], "users_who_share_rooms": ["share_private"], - "per_user_experimental_features": ["enabled"], } diff --git a/synapse/config/repository.py b/synapse/config/repository.py index dc0e93ffa1..97ce6de528 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -272,6 +272,10 @@ class ContentRepositoryConfig(Config): remote_media_lifetime ) + self.enable_authenticated_media = config.get( + "enable_authenticated_media", False + ) + def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str: assert data_dir_path is not None media_store = os.path.join(data_dir_path, "media_store") diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 87c929eb20..8bc92305fe 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -430,6 +430,7 @@ class MediaRepository: media_id: str, name: Optional[str], max_timeout_ms: int, + allow_authenticated: bool = True, federation: bool = False, ) -> None: """Responds to requests for local media, if exists, or returns 404. @@ -442,6 +443,7 @@ class MediaRepository: the filename in the Content-Disposition header of the response. max_timeout_ms: the maximum number of milliseconds to wait for the media to be uploaded. + allow_authenticated: whether media marked as authenticated may be served to this request federation: whether the local media being fetched is for a federation request Returns: @@ -451,6 +453,10 @@ class MediaRepository: if not media_info: return + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + if media_info.authenticated: + raise NotFoundError() + self.mark_recently_accessed(None, media_id) media_type = media_info.media_type @@ -481,6 +487,7 @@ class MediaRepository: max_timeout_ms: int, ip_address: str, use_federation_endpoint: bool, + allow_authenticated: bool = True, ) -> None: """Respond to requests for remote media. @@ -495,6 +502,8 @@ class MediaRepository: ip_address: the IP address of the requester use_federation_endpoint: whether to request the remote media over the new federation `/download` endpoint + allow_authenticated: whether media marked as authenticated may be served to this + request Returns: Resolves once a response has successfully been written to request @@ -526,6 +535,7 @@ class MediaRepository: self.download_ratelimiter, ip_address, use_federation_endpoint, + allow_authenticated, ) # We deliberately stream the file outside the lock @@ -548,6 +558,7 @@ class MediaRepository: max_timeout_ms: int, ip_address: str, use_federation: bool, + allow_authenticated: bool, ) -> RemoteMedia: """Gets the media info associated with the remote file, downloading if necessary. @@ -560,6 +571,8 @@ class MediaRepository: ip_address: IP address of the requester use_federation: if a download is necessary, whether to request the remote file over the federation `/download` endpoint + allow_authenticated: whether media marked as authenticated may be served to this + request Returns: The media info of the file @@ -581,6 +594,7 @@ class MediaRepository: self.download_ratelimiter, ip_address, use_federation, + allow_authenticated, ) # Ensure we actually use the responder so that it releases resources @@ -598,6 +612,7 @@ class MediaRepository: download_ratelimiter: Ratelimiter, ip_address: str, use_federation_endpoint: bool, + allow_authenticated: bool, ) -> Tuple[Optional[Responder], RemoteMedia]: """Looks for media in local cache, if not there then attempt to download from remote server. @@ -619,6 +634,11 @@ class MediaRepository: """ media_info = await self.store.get_cached_remote_media(server_name, media_id) + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + # if it isn't cached then don't fetch it or if it's authenticated then don't serve it + if not media_info or media_info.authenticated: + raise NotFoundError() + # file_id is the ID we use to track the file locally. If we've already # seen the file then reuse the existing ID, otherwise generate a new # one. @@ -792,6 +812,11 @@ class MediaRepository: logger.info("Stored remote media in file %r", fname) + if self.hs.config.media.enable_authenticated_media: + authenticated = True + else: + authenticated = False + return RemoteMedia( media_origin=server_name, media_id=media_id, @@ -802,6 +827,7 @@ class MediaRepository: filesystem_id=file_id, last_access_ts=time_now_ms, quarantined_by=None, + authenticated=authenticated, ) async def _federation_download_remote_file( @@ -915,6 +941,11 @@ class MediaRepository: logger.debug("Stored remote media in file %r", fname) + if self.hs.config.media.enable_authenticated_media: + authenticated = True + else: + authenticated = False + return RemoteMedia( media_origin=server_name, media_id=media_id, @@ -925,6 +956,7 @@ class MediaRepository: filesystem_id=file_id, last_access_ts=time_now_ms, quarantined_by=None, + authenticated=authenticated, ) def _get_thumbnail_requirements( @@ -1030,7 +1062,12 @@ class MediaRepository: t_len = os.path.getsize(output_path) await self.store.store_local_thumbnail( - media_id, t_width, t_height, t_type, t_method, t_len + media_id, + t_width, + t_height, + t_type, + t_method, + t_len, ) return output_path diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index 413a720e40..ef6aa8ccf5 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -26,7 +26,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Type from PIL import Image -from synapse.api.errors import Codes, SynapseError, cs_error +from synapse.api.errors import Codes, NotFoundError, SynapseError, cs_error from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP from synapse.http.server import respond_with_json from synapse.http.site import SynapseRequest @@ -274,6 +274,7 @@ class ThumbnailProvider: m_type: str, max_timeout_ms: int, for_federation: bool, + allow_authenticated: bool = True, ) -> None: media_info = await self.media_repo.get_local_media_info( request, media_id, max_timeout_ms @@ -281,6 +282,12 @@ class ThumbnailProvider: if not media_info: return + # if the media the thumbnail is generated from is authenticated, don't serve the + # thumbnail over an unauthenticated endpoint + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + if media_info.authenticated: + raise NotFoundError() + thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) await self._select_and_respond_with_thumbnail( request, @@ -307,14 +314,20 @@ class ThumbnailProvider: desired_type: str, max_timeout_ms: int, for_federation: bool, + allow_authenticated: bool = True, ) -> None: media_info = await self.media_repo.get_local_media_info( request, media_id, max_timeout_ms ) - if not media_info: return + # if the media the thumbnail is generated from is authenticated, don't serve the + # thumbnail over an unauthenticated endpoint + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + if media_info.authenticated: + raise NotFoundError() + thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) for info in thumbnail_infos: t_w = info.width == desired_width @@ -381,14 +394,27 @@ class ThumbnailProvider: max_timeout_ms: int, ip_address: str, use_federation: bool, + allow_authenticated: bool = True, ) -> None: media_info = await self.media_repo.get_remote_media_info( - server_name, media_id, max_timeout_ms, ip_address, use_federation + server_name, + media_id, + max_timeout_ms, + ip_address, + use_federation, + allow_authenticated, ) if not media_info: respond_404(request) return + # if the media the thumbnail is generated from is authenticated, don't serve the + # thumbnail over an unauthenticated endpoint + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + if media_info.authenticated: + respond_404(request) + return + thumbnail_infos = await self.store.get_remote_media_thumbnails( server_name, media_id ) @@ -446,16 +472,28 @@ class ThumbnailProvider: max_timeout_ms: int, ip_address: str, use_federation: bool, + allow_authenticated: bool = True, ) -> None: # TODO: Don't download the whole remote file # We should proxy the thumbnail from the remote server instead of # downloading the remote file and generating our own thumbnails. media_info = await self.media_repo.get_remote_media_info( - server_name, media_id, max_timeout_ms, ip_address, use_federation + server_name, + media_id, + max_timeout_ms, + ip_address, + use_federation, + allow_authenticated, ) if not media_info: return + # if the media the thumbnail is generated from is authenticated, don't serve the + # thumbnail over an unauthenticated endpoint + if self.hs.config.media.enable_authenticated_media and not allow_authenticated: + if media_info.authenticated: + raise NotFoundError() + thumbnail_infos = await self.store.get_remote_media_thumbnails( server_name, media_id ) @@ -485,8 +523,8 @@ class ThumbnailProvider: file_id: str, url_cache: bool, for_federation: bool, - server_name: Optional[str] = None, media_info: Optional[LocalMedia] = None, + server_name: Optional[str] = None, ) -> None: """ Respond to a request with an appropriate thumbnail from the previously generated thumbnails. diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py index c32c626905..3c3f703667 100644 --- a/synapse/rest/media/download_resource.py +++ b/synapse/rest/media/download_resource.py @@ -84,7 +84,7 @@ class DownloadResource(RestServlet): if self._is_mine_server_name(server_name): await self.media_repo.get_local_media( - request, media_id, file_name, max_timeout_ms + request, media_id, file_name, max_timeout_ms, allow_authenticated=False ) else: allow_remote = parse_boolean(request, "allow_remote", default=True) @@ -106,4 +106,5 @@ class DownloadResource(RestServlet): max_timeout_ms, ip_address, False, + allow_authenticated=False, ) diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index 70354aa439..536fea4c32 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -96,6 +96,7 @@ class ThumbnailResource(RestServlet): m_type, max_timeout_ms, False, + allow_authenticated=False, ) else: await self.thumbnail_provider.respond_local_thumbnail( @@ -107,6 +108,7 @@ class ThumbnailResource(RestServlet): m_type, max_timeout_ms, False, + allow_authenticated=False, ) self.media_repo.mark_recently_accessed(None, media_id) else: @@ -134,6 +136,7 @@ class ThumbnailResource(RestServlet): m_type, max_timeout_ms, ip_address, - False, + use_federation=False, + allow_authenticated=False, ) self.media_repo.mark_recently_accessed(server_name, media_id) diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 6128332af8..7617fd3ad4 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -64,6 +64,7 @@ class LocalMedia: quarantined_by: Optional[str] safe_from_quarantine: bool user_id: Optional[str] + authenticated: Optional[bool] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -77,6 +78,7 @@ class RemoteMedia: created_ts: int last_access_ts: int quarantined_by: Optional[str] + authenticated: Optional[bool] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -218,6 +220,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "last_access_ts", "safe_from_quarantine", "user_id", + "authenticated", ), allow_none=True, desc="get_local_media", @@ -235,6 +238,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): last_access_ts=row[6], safe_from_quarantine=row[7], user_id=row[8], + authenticated=row[9], ) async def get_local_media_by_user_paginate( @@ -290,7 +294,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): last_access_ts, quarantined_by, safe_from_quarantine, - user_id + user_id, + authenticated FROM local_media_repository WHERE user_id = ? ORDER BY {order_by_column} {order}, media_id ASC @@ -314,6 +319,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): quarantined_by=row[7], safe_from_quarantine=bool(row[8]), user_id=row[9], + authenticated=row[10], ) for row in txn ] @@ -417,12 +423,18 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): time_now_ms: int, user_id: UserID, ) -> None: + if self.hs.config.media.enable_authenticated_media: + authenticated = True + else: + authenticated = False + await self.db_pool.simple_insert( "local_media_repository", { "media_id": media_id, "created_ts": time_now_ms, "user_id": user_id.to_string(), + "authenticated": authenticated, }, desc="store_local_media_id", ) @@ -438,6 +450,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): user_id: UserID, url_cache: Optional[str] = None, ) -> None: + if self.hs.config.media.enable_authenticated_media: + authenticated = True + else: + authenticated = False + await self.db_pool.simple_insert( "local_media_repository", { @@ -448,6 +465,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "media_length": media_length, "user_id": user_id.to_string(), "url_cache": url_cache, + "authenticated": authenticated, }, desc="store_local_media", ) @@ -638,6 +656,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "filesystem_id", "last_access_ts", "quarantined_by", + "authenticated", ), allow_none=True, desc="get_cached_remote_media", @@ -654,6 +673,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): filesystem_id=row[4], last_access_ts=row[5], quarantined_by=row[6], + authenticated=row[7], ) async def store_cached_remote_media( @@ -666,6 +686,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): upload_name: Optional[str], filesystem_id: str, ) -> None: + if self.hs.config.media.enable_authenticated_media: + authenticated = True + else: + authenticated = False + await self.db_pool.simple_insert( "remote_media_cache", { @@ -677,6 +702,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "upload_name": upload_name, "filesystem_id": filesystem_id, "last_access_ts": time_now_ms, + "authenticated": authenticated, }, desc="store_cached_remote_media", ) diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 0dc5d24249..581d00346b 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -19,7 +19,7 @@ # # -SCHEMA_VERSION = 85 # remember to update the list below when updating +SCHEMA_VERSION = 86 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -139,6 +139,9 @@ Changes in SCHEMA_VERSION = 84 Changes in SCHEMA_VERSION = 85 - Add a column `suspended` to the `users` table + +Changes in SCHEMA_VERSION = 86 + - Add a column `authenticated` to the tables `local_media_repository` and `remote_media_cache` """ diff --git a/synapse/storage/schema/main/delta/86/01_authenticate_media.sql b/synapse/storage/schema/main/delta/86/01_authenticate_media.sql new file mode 100644 index 0000000000..c1ac01ae95 --- /dev/null +++ b/synapse/storage/schema/main/delta/86/01_authenticate_media.sql @@ -0,0 +1,15 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +ALTER TABLE remote_media_cache ADD COLUMN authenticated BOOLEAN DEFAULT FALSE NOT NULL; +ALTER TABLE local_media_repository ADD COLUMN authenticated BOOLEAN DEFAULT FALSE NOT NULL; diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py index 466c5a0b70..30b6d31d0a 100644 --- a/tests/rest/client/test_media.py +++ b/tests/rest/client/test_media.py @@ -43,6 +43,7 @@ from twisted.python.failure import Failure from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor from twisted.web.http_headers import Headers from twisted.web.iweb import UNKNOWN_LENGTH, IResponse +from twisted.web.resource import Resource from synapse.api.errors import HttpResponseException from synapse.api.ratelimiting import Ratelimiter @@ -2466,3 +2467,211 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): server_name=None, ) ) + + +configs = [ + {"extra_config": {"dynamic_thumbnails": True}}, + {"extra_config": {"dynamic_thumbnails": False}}, +] + + +@parameterized_class(configs) +class AuthenticatedMediaTestCase(unittest.HomeserverTestCase): + extra_config: Dict[str, Any] + servlets = [ + media.register_servlets, + login.register_servlets, + admin.register_servlets, + ] + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + config = self.default_config() + + self.clock = clock + self.storage_path = self.mktemp() + self.media_store_path = self.mktemp() + os.mkdir(self.storage_path) + os.mkdir(self.media_store_path) + config["media_store_path"] = self.media_store_path + config["enable_authenticated_media"] = True + + provider_config = { + "module": "synapse.media.storage_provider.FileStorageProviderBackend", + "store_local": True, + "store_synchronous": False, + "store_remote": True, + "config": {"directory": self.storage_path}, + } + + config["media_storage_providers"] = [provider_config] + config.update(self.extra_config) + + return self.setup_test_homeserver(config=config) + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.repo = hs.get_media_repository() + self.client = hs.get_federation_http_client() + self.store = hs.get_datastores().main + self.user = self.register_user("user", "pass") + self.tok = self.login("user", "pass") + + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + + def test_authenticated_media(self) -> None: + # upload some local media with authentication on + channel = self.make_request( + "POST", + "_matrix/media/v3/upload?filename=test_png_upload", + SMALL_PNG, + self.tok, + shorthand=False, + content_type=b"image/png", + custom_headers=[("Content-Length", str(67))], + ) + self.assertEqual(channel.code, 200) + res = channel.json_body.get("content_uri") + assert res is not None + uri = res.split("mxc://")[1] + + # request media over authenticated endpoint, should be found + channel2 = self.make_request( + "GET", + f"_matrix/client/v1/media/download/{uri}", + access_token=self.tok, + shorthand=False, + ) + self.assertEqual(channel2.code, 200) + + # request same media over unauthenticated media, should raise 404 not found + channel3 = self.make_request( + "GET", f"_matrix/media/v3/download/{uri}", shorthand=False + ) + self.assertEqual(channel3.code, 404) + + # check thumbnails as well + params = "?width=32&height=32&method=crop" + channel4 = self.make_request( + "GET", + f"/_matrix/client/v1/media/thumbnail/{uri}{params}", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel4.code, 200) + + params = "?width=32&height=32&method=crop" + channel5 = self.make_request( + "GET", + f"/_matrix/media/r0/thumbnail/{uri}{params}", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel5.code, 404) + + # Inject a piece of remote media. + file_id = "abcdefg12345" + file_info = FileInfo(server_name="lonelyIsland", file_id=file_id) + + media_storage = self.hs.get_media_repository().media_storage + + ctx = media_storage.store_into_file(file_info) + (f, fname) = self.get_success(ctx.__aenter__()) + f.write(SMALL_PNG) + self.get_success(ctx.__aexit__(None, None, None)) + + # we write the authenticated status when storing media, so this should pick up + # config and authenticate the media + self.get_success( + self.store.store_cached_remote_media( + origin="lonelyIsland", + media_id="52", + media_type="image/png", + media_length=1, + time_now_ms=self.clock.time_msec(), + upload_name="remote_test.png", + filesystem_id=file_id, + ) + ) + + # ensure we have thumbnails for the non-dynamic code path + if self.extra_config == {"dynamic_thumbnails": False}: + self.get_success( + self.repo._generate_thumbnails( + "lonelyIsland", "52", file_id, "image/png" + ) + ) + + channel6 = self.make_request( + "GET", + "_matrix/client/v1/media/download/lonelyIsland/52", + access_token=self.tok, + shorthand=False, + ) + self.assertEqual(channel6.code, 200) + + channel7 = self.make_request( + "GET", f"_matrix/media/v3/download/{uri}", shorthand=False + ) + self.assertEqual(channel7.code, 404) + + params = "?width=32&height=32&method=crop" + channel8 = self.make_request( + "GET", + f"/_matrix/client/v1/media/thumbnail/lonelyIsland/52{params}", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel8.code, 200) + + channel9 = self.make_request( + "GET", + f"/_matrix/media/r0/thumbnail/lonelyIsland/52{params}", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel9.code, 404) + + # Inject a piece of local media that isn't authenticated + file_id = "abcdefg123456" + file_info = FileInfo(None, file_id=file_id) + + ctx = media_storage.store_into_file(file_info) + (f, fname) = self.get_success(ctx.__aenter__()) + f.write(SMALL_PNG) + self.get_success(ctx.__aexit__(None, None, None)) + + self.get_success( + self.store.db_pool.simple_insert( + "local_media_repository", + { + "media_id": "abcdefg123456", + "media_type": "image/png", + "created_ts": self.clock.time_msec(), + "upload_name": "test_local", + "media_length": 1, + "user_id": "someone", + "url_cache": None, + "authenticated": False, + }, + desc="store_local_media", + ) + ) + + # check that unauthenticated media is still available over both endpoints + channel9 = self.make_request( + "GET", + "/_matrix/client/v1/media/download/test/abcdefg123456", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel9.code, 200) + + channel10 = self.make_request( + "GET", + "/_matrix/media/r0/download/test/abcdefg123456", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel10.code, 200) From 1648337775e8004749a29cdb4d163cafc3edd076 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 14:28:54 +0100 Subject: [PATCH 054/332] Bump sentry-sdk from 2.8.0 to 2.10.0 (#17467) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2bfcb59cf2..68d10e7c0d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2430,13 +2430,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.8.0" +version = "2.10.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.8.0-py2.py3-none-any.whl", hash = "sha256:6051562d2cfa8087bb8b4b8b79dc44690f8a054762a29c07e22588b1f619bfb5"}, - {file = "sentry_sdk-2.8.0.tar.gz", hash = "sha256:aa4314f877d9cd9add5a0c9ba18e3f27f99f7de835ce36bd150e48a41c7c646f"}, + {file = "sentry_sdk-2.10.0-py2.py3-none-any.whl", hash = "sha256:87b3d413c87d8e7f816cc9334bff255a83d8b577db2b22042651c30c19c09190"}, + {file = "sentry_sdk-2.10.0.tar.gz", hash = "sha256:545fcc6e36c335faa6d6cda84669b6e17025f31efbf3b2211ec14efe008b75d1"}, ] [package.dependencies] From 73529d373246b09fd3d83b494500dc944f208867 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 14:29:06 +0100 Subject: [PATCH 055/332] Bump ruff from 0.5.0 to 0.5.4 (#17466) --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index 68d10e7c0d..282219e194 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2373,29 +2373,29 @@ files = [ [[package]] name = "ruff" -version = "0.5.0" +version = "0.5.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.5.0-py3-none-linux_armv6l.whl", hash = "sha256:ee770ea8ab38918f34e7560a597cc0a8c9a193aaa01bfbd879ef43cb06bd9c4c"}, - {file = "ruff-0.5.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38f3b8327b3cb43474559d435f5fa65dacf723351c159ed0dc567f7ab735d1b6"}, - {file = "ruff-0.5.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7594f8df5404a5c5c8f64b8311169879f6cf42142da644c7e0ba3c3f14130370"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:adc7012d6ec85032bc4e9065110df205752d64010bed5f958d25dbee9ce35de3"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d505fb93b0fabef974b168d9b27c3960714d2ecda24b6ffa6a87ac432905ea38"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dc5cfd3558f14513ed0d5b70ce531e28ea81a8a3b1b07f0f48421a3d9e7d80a"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:db3ca35265de239a1176d56a464b51557fce41095c37d6c406e658cf80bbb362"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1a321c4f68809fddd9b282fab6a8d8db796b270fff44722589a8b946925a2a8"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c4dfcd8d34b143916994b3876b63d53f56724c03f8c1a33a253b7b1e6bf2a7d"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81e5facfc9f4a674c6a78c64d38becfbd5e4f739c31fcd9ce44c849f1fad9e4c"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e589e27971c2a3efff3fadafb16e5aef7ff93250f0134ec4b52052b673cf988d"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2ffbc3715a52b037bcb0f6ff524a9367f642cdc5817944f6af5479bbb2eb50e"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cd096e23c6a4f9c819525a437fa0a99d1c67a1b6bb30948d46f33afbc53596cf"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:46e193b36f2255729ad34a49c9a997d506e58f08555366b2108783b3064a0e1e"}, - {file = "ruff-0.5.0-py3-none-win32.whl", hash = "sha256:49141d267100f5ceff541b4e06552e98527870eafa1acc9dec9139c9ec5af64c"}, - {file = "ruff-0.5.0-py3-none-win_amd64.whl", hash = "sha256:e9118f60091047444c1b90952736ee7b1792910cab56e9b9a9ac20af94cd0440"}, - {file = "ruff-0.5.0-py3-none-win_arm64.whl", hash = "sha256:ed5c4df5c1fb4518abcb57725b576659542bdbe93366f4f329e8f398c4b71178"}, - {file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"}, + {file = "ruff-0.5.4-py3-none-linux_armv6l.whl", hash = "sha256:82acef724fc639699b4d3177ed5cc14c2a5aacd92edd578a9e846d5b5ec18ddf"}, + {file = "ruff-0.5.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:da62e87637c8838b325e65beee485f71eb36202ce8e3cdbc24b9fcb8b99a37be"}, + {file = "ruff-0.5.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e98ad088edfe2f3b85a925ee96da652028f093d6b9b56b76fc242d8abb8e2059"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c55efbecc3152d614cfe6c2247a3054cfe358cefbf794f8c79c8575456efe19"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9b85eaa1f653abd0a70603b8b7008d9e00c9fa1bbd0bf40dad3f0c0bdd06793"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cf497a47751be8c883059c4613ba2f50dd06ec672692de2811f039432875278"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:09c14ed6a72af9ccc8d2e313d7acf7037f0faff43cde4b507e66f14e812e37f7"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:628f6b8f97b8bad2490240aa84f3e68f390e13fabc9af5c0d3b96b485921cd60"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3520a00c0563d7a7a7c324ad7e2cde2355733dafa9592c671fb2e9e3cd8194c1"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93789f14ca2244fb91ed481456f6d0bb8af1f75a330e133b67d08f06ad85b516"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:029454e2824eafa25b9df46882f7f7844d36fd8ce51c1b7f6d97e2615a57bbcc"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9492320eed573a13a0bc09a2957f17aa733fff9ce5bf00e66e6d4a88ec33813f"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a6e1f62a92c645e2919b65c02e79d1f61e78a58eddaebca6c23659e7c7cb4ac7"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:768fa9208df2bec4b2ce61dbc7c2ddd6b1be9fb48f1f8d3b78b3332c7d71c1ff"}, + {file = "ruff-0.5.4-py3-none-win32.whl", hash = "sha256:e1e7393e9c56128e870b233c82ceb42164966f25b30f68acbb24ed69ce9c3a4e"}, + {file = "ruff-0.5.4-py3-none-win_amd64.whl", hash = "sha256:58b54459221fd3f661a7329f177f091eb35cf7a603f01d9eb3eb11cc348d38c4"}, + {file = "ruff-0.5.4-py3-none-win_arm64.whl", hash = "sha256:bd53da65f1085fb5b307c38fd3c0829e76acf7b2a912d8d79cadcdb4875c1eb7"}, + {file = "ruff-0.5.4.tar.gz", hash = "sha256:2795726d5f71c4f4e70653273d1c23a8182f07dd8e48c12de5d867bfb7557eed"}, ] [[package]] @@ -3230,4 +3230,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "3372a97db99050a34f8eddad2ddf8efe8b7b704b6123df4a3e36ddc171e8f34d" +content-hash = "e65fbd044230964cae8810c84289bcf0bc43b27532ea5a5ef8843eab4f6514af" diff --git a/pyproject.toml b/pyproject.toml index 0f040fc612..50dc0c3c63 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -322,7 +322,7 @@ all = [ # This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" black = ">=22.7.0" -ruff = "0.5.0" +ruff = "0.5.4" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From ed0face8ada72731eba70f9cc56a9b482b7bef1e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2024 14:51:17 +0100 Subject: [PATCH 056/332] Speed up room keys query by using read/write lock (#17461) Linaerizing all access slows things down when devices try and fetch lots of keys on login --- changelog.d/17461.misc | 1 + synapse/handlers/e2e_room_keys.py | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) create mode 100644 changelog.d/17461.misc diff --git a/changelog.d/17461.misc b/changelog.d/17461.misc new file mode 100644 index 0000000000..80f7144baa --- /dev/null +++ b/changelog.d/17461.misc @@ -0,0 +1 @@ +Speed up fetching room keys from backup. diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 99f9f6e64a..f397911f28 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -34,7 +34,7 @@ from synapse.api.errors import ( from synapse.logging.opentracing import log_kv, trace from synapse.storage.databases.main.e2e_room_keys import RoomKey from synapse.types import JsonDict -from synapse.util.async_helpers import Linearizer +from synapse.util.async_helpers import ReadWriteLock if TYPE_CHECKING: from synapse.server import HomeServer @@ -58,7 +58,7 @@ class E2eRoomKeysHandler: # clients belonging to a user will receive and try to upload a new session at # roughly the same time. Also used to lock out uploads when the key is being # changed. - self._upload_linearizer = Linearizer("upload_room_keys_lock") + self._upload_lock = ReadWriteLock() @trace async def get_room_keys( @@ -89,7 +89,7 @@ class E2eRoomKeysHandler: # we deliberately take the lock to get keys so that changing the version # works atomically - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.read(user_id): # make sure the backup version exists try: await self.store.get_e2e_room_keys_version_info(user_id, version) @@ -132,7 +132,7 @@ class E2eRoomKeysHandler: """ # lock for consistency with uploading - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.write(user_id): # make sure the backup version exists try: version_info = await self.store.get_e2e_room_keys_version_info( @@ -193,7 +193,7 @@ class E2eRoomKeysHandler: # TODO: Validate the JSON to make sure it has the right keys. # XXX: perhaps we should use a finer grained lock here? - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.write(user_id): # Check that the version we're trying to upload is the current version try: version_info = await self.store.get_e2e_room_keys_version_info(user_id) @@ -355,7 +355,7 @@ class E2eRoomKeysHandler: # TODO: Validate the JSON to make sure it has the right keys. # lock everyone out until we've switched version - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.write(user_id): new_version = await self.store.create_e2e_room_keys_version( user_id, version_info ) @@ -382,7 +382,7 @@ class E2eRoomKeysHandler: } """ - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.read(user_id): try: res = await self.store.get_e2e_room_keys_version_info(user_id, version) except StoreError as e: @@ -407,7 +407,7 @@ class E2eRoomKeysHandler: NotFoundError: if this backup version doesn't exist """ - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.write(user_id): try: await self.store.delete_e2e_room_keys_version(user_id, version) except StoreError as e: @@ -437,7 +437,7 @@ class E2eRoomKeysHandler: raise SynapseError( 400, "Version in body does not match", Codes.INVALID_PARAM ) - async with self._upload_linearizer.queue(user_id): + async with self._upload_lock.write(user_id): try: old_info = await self.store.get_e2e_room_keys_version_info( user_id, version From d221512498f7e3267916a289dd2ef4f3e00728e8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2024 17:48:09 +0100 Subject: [PATCH 057/332] SS: Implement `$ME` support (#17469) `$ME` can be used as a substitute for the requester's user ID. --- changelog.d/17469.misc | 1 + synapse/handlers/sliding_sync.py | 6 ++- tests/rest/client/test_sync.py | 74 ++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17469.misc diff --git a/changelog.d/17469.misc b/changelog.d/17469.misc new file mode 100644 index 0000000000..ba0419355b --- /dev/null +++ b/changelog.d/17469.misc @@ -0,0 +1 @@ +Implement handling of `$ME` as a state key in sliding sync. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 423f0329d6..c362afa6e2 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -329,6 +329,9 @@ class StateValues: # `sender` in the timeline). We only give special meaning to this value when it's a # `state_key`. LAZY: Final = "$LAZY" + # Subsitute with the requester's user ID. Typically used by clients to get + # the user's membership. + ME: Final = "$ME" class SlidingSyncHandler: @@ -504,7 +507,6 @@ class SlidingSyncHandler: # Also see `StateFilter.must_await_full_state(...)` for comparison lazy_loading = ( membership_state_keys is not None - and len(membership_state_keys) == 1 and StateValues.LAZY in membership_state_keys ) @@ -1662,6 +1664,8 @@ class SlidingSyncHandler: # FIXME: We probably also care about invite, ban, kick, targets, etc # but the spec only mentions "senders". + elif state_key == StateValues.ME: + required_state_types.append((state_type, user.to_string())) else: required_state_types.append((state_type, state_key)) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index a008ee465b..a88bdb5c14 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -3714,6 +3714,80 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + def test_rooms_required_state_me(self) -> None: + """ + Test `rooms.required_state` correctly handles $ME. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + + # Also send normal state events with state keys of the users, first + # change the power levels to allow this. + self.helper.send_state( + room_id1, + event_type=EventTypes.PowerLevels, + body={"users": {user1_id: 50, user2_id: 100}}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo", + state_key=user1_id, + body={}, + tok=user1_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo", + state_key=user2_id, + body={}, + tok=user2_tok, + ) + + # Make the Sliding Sync request with a request for '$ME'. + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.ME], + ["org.matrix.foo", StateValues.ME], + ], + "timeline_limit": 3, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user3 sent events in the 3 events we see in the `timeline` + self._assertRequiredStateIncludes( + channel.json_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.Member, user1_id)], + state_map[("org.matrix.foo", user1_id)], + }, + exact=True, + ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: """ From de05a642460fa04f6e279fa166f032e9ff94b4b0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Jul 2024 15:40:06 -0500 Subject: [PATCH 058/332] Sliding Sync: Add E2EE extension (MSC3884) (#17454) Spec: [MSC3884](https://github.com/matrix-org/matrix-spec-proposals/pull/3884) Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17454.feature | 1 + synapse/handlers/device.py | 17 +- synapse/handlers/sliding_sync.py | 107 +++- synapse/rest/client/keys.py | 10 +- synapse/rest/client/sync.py | 32 +- synapse/types/__init__.py | 7 +- synapse/types/handlers/__init__.py | 48 +- synapse/types/rest/client/__init__.py | 10 + tests/rest/client/test_sync.py | 825 +++++++++++++++++++++++++- 9 files changed, 1023 insertions(+), 34 deletions(-) create mode 100644 changelog.d/17454.feature diff --git a/changelog.d/17454.feature b/changelog.d/17454.feature new file mode 100644 index 0000000000..bb088371bf --- /dev/null +++ b/changelog.d/17454.feature @@ -0,0 +1 @@ +Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 0432d97109..4fc6fcd7ae 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -39,6 +39,7 @@ from synapse.metrics.background_process_metrics import ( ) from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo from synapse.types import ( + DeviceListUpdates, JsonDict, JsonMapping, ScheduledTask, @@ -214,7 +215,7 @@ class DeviceWorkerHandler: @cancellable async def get_user_ids_changed( self, user_id: str, from_token: StreamToken - ) -> JsonDict: + ) -> DeviceListUpdates: """Get list of users that have had the devices updated, or have newly joined a room, that `user_id` may be interested in. """ @@ -341,11 +342,19 @@ class DeviceWorkerHandler: possibly_joined = set() possibly_left = set() - result = {"changed": list(possibly_joined), "left": list(possibly_left)} + device_list_updates = DeviceListUpdates( + changed=possibly_joined, + left=possibly_left, + ) - log_kv(result) + log_kv( + { + "changed": device_list_updates.changed, + "left": device_list_updates.left, + } + ) - return result + return device_list_updates async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: if not self.hs.is_mine(UserID.from_string(user_id)): diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index c362afa6e2..886d7c7159 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -19,7 +19,18 @@ # import logging from itertools import chain -from typing import TYPE_CHECKING, Any, Dict, Final, List, Mapping, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Final, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, +) import attr from immutabledict import immutabledict @@ -33,6 +44,7 @@ from synapse.storage.databases.main.roommember import extract_heroes_from_room_s from synapse.storage.databases.main.stream import CurrentStateDeltaMembership from synapse.storage.roommember import MemberSummary from synapse.types import ( + DeviceListUpdates, JsonDict, PersistedEventPosition, Requester, @@ -343,6 +355,7 @@ class SlidingSyncHandler: self.notifier = hs.get_notifier() self.event_sources = hs.get_event_sources() self.relations_handler = hs.get_relations_handler() + self.device_handler = hs.get_device_handler() self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync async def wait_for_sync_for_user( @@ -371,10 +384,6 @@ class SlidingSyncHandler: # auth_blocking will occur) await self.auth_blocking.check_auth_blocking(requester=requester) - # TODO: If the To-Device extension is enabled and we have a `from_token`, delete - # any to-device messages before that token (since we now know that the device - # has received them). (see sync v2 for how to do this) - # If we're working with a user-provided token, we need to make sure to wait for # this worker to catch up with the token so we don't skip past any incoming # events or future events if the user is nefariously, manually modifying the @@ -617,7 +626,9 @@ class SlidingSyncHandler: await concurrently_execute(handle_room, relevant_room_map, 10) extensions = await self.get_extensions_response( - sync_config=sync_config, to_token=to_token + sync_config=sync_config, + from_token=from_token, + to_token=to_token, ) return SlidingSyncResult( @@ -1776,33 +1787,47 @@ class SlidingSyncHandler: self, sync_config: SlidingSyncConfig, to_token: StreamToken, + from_token: Optional[StreamToken], ) -> SlidingSyncResult.Extensions: """Handle extension requests. Args: sync_config: Sync configuration to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. """ if sync_config.extensions is None: return SlidingSyncResult.Extensions() to_device_response = None - if sync_config.extensions.to_device: - to_device_response = await self.get_to_device_extensions_response( + if sync_config.extensions.to_device is not None: + to_device_response = await self.get_to_device_extension_response( sync_config=sync_config, to_device_request=sync_config.extensions.to_device, to_token=to_token, ) - return SlidingSyncResult.Extensions(to_device=to_device_response) + e2ee_response = None + if sync_config.extensions.e2ee is not None: + e2ee_response = await self.get_e2ee_extension_response( + sync_config=sync_config, + e2ee_request=sync_config.extensions.e2ee, + to_token=to_token, + from_token=from_token, + ) - async def get_to_device_extensions_response( + return SlidingSyncResult.Extensions( + to_device=to_device_response, + e2ee=e2ee_response, + ) + + async def get_to_device_extension_response( self, sync_config: SlidingSyncConfig, to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension, to_token: StreamToken, - ) -> SlidingSyncResult.Extensions.ToDeviceExtension: + ) -> Optional[SlidingSyncResult.Extensions.ToDeviceExtension]: """Handle to-device extension (MSC3885) Args: @@ -1810,14 +1835,16 @@ class SlidingSyncHandler: to_device_request: The to-device extension from the request to_token: The point in the stream to sync up to. """ - user_id = sync_config.user.to_string() device_id = sync_config.device_id + # Skip if the extension is not enabled + if not to_device_request.enabled: + return None + # Check that this request has a valid device ID (not all requests have - # to belong to a device, and so device_id is None), and that the - # extension is enabled. - if device_id is None or not to_device_request.enabled: + # to belong to a device, and so device_id is None) + if device_id is None: return SlidingSyncResult.Extensions.ToDeviceExtension( next_batch=f"{to_token.to_device_key}", events=[], @@ -1868,3 +1895,53 @@ class SlidingSyncHandler: next_batch=f"{stream_id}", events=messages, ) + + async def get_e2ee_extension_response( + self, + sync_config: SlidingSyncConfig, + e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]: + """Handle E2EE device extension (MSC3884) + + Args: + sync_config: Sync configuration + e2ee_request: The e2ee extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + user_id = sync_config.user.to_string() + device_id = sync_config.device_id + + # Skip if the extension is not enabled + if not e2ee_request.enabled: + return None + + device_list_updates: Optional[DeviceListUpdates] = None + if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` + device_list_updates = await self.device_handler.get_user_ids_changed( + user_id=user_id, + from_token=from_token, + ) + + device_one_time_keys_count: Mapping[str, int] = {} + device_unused_fallback_key_types: Sequence[str] = [] + if device_id: + # TODO: We should have a way to let clients differentiate between the states of: + # * no change in OTK count since the provided since token + # * the server has zero OTKs left for this device + # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 + device_one_time_keys_count = await self.store.count_e2e_one_time_keys( + user_id, device_id + ) + device_unused_fallback_key_types = ( + await self.store.get_e2e_unused_fallback_key_types(user_id, device_id) + ) + + return SlidingSyncResult.Extensions.E2eeExtension( + device_list_updates=device_list_updates, + device_one_time_keys_count=device_one_time_keys_count, + device_unused_fallback_key_types=device_unused_fallback_key_types, + ) diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 67de634eab..eddad7d5b8 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -256,9 +256,15 @@ class KeyChangesServlet(RestServlet): user_id = requester.user.to_string() - results = await self.device_handler.get_user_ids_changed(user_id, from_token) + device_list_updates = await self.device_handler.get_user_ids_changed( + user_id, from_token + ) - return 200, results + response: JsonDict = {} + response["changed"] = list(device_list_updates.changed) + response["left"] = list(device_list_updates.left) + + return 200, response class OneTimeKeyServlet(RestServlet): diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 1d8cbfdf00..93fe1d439e 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -1081,15 +1081,41 @@ class SlidingSyncRestServlet(RestServlet): async def encode_extensions( self, requester: Requester, extensions: SlidingSyncResult.Extensions ) -> JsonDict: - result = {} + serialized_extensions: JsonDict = {} if extensions.to_device is not None: - result["to_device"] = { + serialized_extensions["to_device"] = { "next_batch": extensions.to_device.next_batch, "events": extensions.to_device.events, } - return result + if extensions.e2ee is not None: + serialized_extensions["e2ee"] = { + # We always include this because + # https://github.com/vector-im/element-android/issues/3725. The spec + # isn't terribly clear on when this can be omitted and how a client + # would tell the difference between "no keys present" and "nothing + # changed" in terms of whole field absent / individual key type entry + # absent Corresponding synapse issue: + # https://github.com/matrix-org/synapse/issues/10456 + "device_one_time_keys_count": extensions.e2ee.device_one_time_keys_count, + # https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md + # states that this field should always be included, as long as the + # server supports the feature. + "device_unused_fallback_key_types": extensions.e2ee.device_unused_fallback_key_types, + } + + if extensions.e2ee.device_list_updates is not None: + serialized_extensions["e2ee"]["device_lists"] = {} + + serialized_extensions["e2ee"]["device_lists"]["changed"] = list( + extensions.e2ee.device_list_updates.changed + ) + serialized_extensions["e2ee"]["device_lists"]["left"] = list( + extensions.e2ee.device_list_updates.left + ) + + return serialized_extensions def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 3962ecc996..046cdc29cd 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -1219,11 +1219,12 @@ class ReadReceipt: @attr.s(slots=True, frozen=True, auto_attribs=True) class DeviceListUpdates: """ - An object containing a diff of information regarding other users' device lists, intended for - a recipient to carry out device list tracking. + An object containing a diff of information regarding other users' device lists, + intended for a recipient to carry out device list tracking. Attributes: - changed: A set of users whose device lists have changed recently. + changed: A set of users who have updated their device identity or + cross-signing keys, or who now share an encrypted room with. left: A set of users who the recipient no longer needs to track the device lists of. Typically when those users no longer share any end-to-end encryption enabled rooms. """ diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 409120470a..4c6c42db04 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -18,7 +18,7 @@ # # from enum import Enum -from typing import TYPE_CHECKING, Dict, Final, List, Optional, Sequence, Tuple +from typing import TYPE_CHECKING, Dict, Final, List, Mapping, Optional, Sequence, Tuple import attr from typing_extensions import TypedDict @@ -31,7 +31,7 @@ else: from pydantic import Extra from synapse.events import EventBase -from synapse.types import JsonDict, JsonMapping, StreamToken, UserID +from synapse.types import DeviceListUpdates, JsonDict, JsonMapping, StreamToken, UserID from synapse.types.rest.client import SlidingSyncBody if TYPE_CHECKING: @@ -264,6 +264,7 @@ class SlidingSyncResult: Attributes: to_device: The to-device extension (MSC3885) + e2ee: The E2EE device extension (MSC3884) """ @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -282,10 +283,51 @@ class SlidingSyncResult: def __bool__(self) -> bool: return bool(self.events) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class E2eeExtension: + """The E2EE device extension (MSC3884) + + Attributes: + device_list_updates: List of user_ids whose devices have changed or left (only + present on incremental syncs). + device_one_time_keys_count: Map from key algorithm to the number of + unclaimed one-time keys currently held on the server for this device. If + an algorithm is unlisted, the count for that algorithm is assumed to be + zero. If this entire parameter is missing, the count for all algorithms + is assumed to be zero. + device_unused_fallback_key_types: List of unused fallback key algorithms + for this device. + """ + + # Only present on incremental syncs + device_list_updates: Optional[DeviceListUpdates] + device_one_time_keys_count: Mapping[str, int] + device_unused_fallback_key_types: Sequence[str] + + def __bool__(self) -> bool: + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + default_otk = self.device_one_time_keys_count.get("signed_curve25519") + more_than_default_otk = len(self.device_one_time_keys_count) > 1 or ( + default_otk is not None and default_otk > 0 + ) + + return bool( + more_than_default_otk + or self.device_list_updates + or self.device_unused_fallback_key_types + ) + to_device: Optional[ToDeviceExtension] = None + e2ee: Optional[E2eeExtension] = None def __bool__(self) -> bool: - return bool(self.to_device) + return bool(self.to_device or self.e2ee) next_pos: StreamToken lists: Dict[str, SlidingWindowList] diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index dbe37bc712..f3c45a0d6a 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -313,7 +313,17 @@ class SlidingSyncBody(RequestBodyModel): return value + class E2eeExtension(RequestBodyModel): + """The E2EE device extension (MSC3884) + + Attributes: + enabled + """ + + enabled: Optional[StrictBool] = False + to_device: Optional[ToDeviceExtension] = None + e2ee: Optional[E2eeExtension] = None # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index a88bdb5c14..2628869de6 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -59,6 +59,7 @@ from tests.federation.transport.test_knocking import ( ) from tests.server import FakeChannel, TimedOutException from tests.test_utils.event_injection import mark_event_as_partial_state +from tests.unittest import skip_unless logger = logging.getLogger(__name__) @@ -1113,12 +1114,11 @@ class DeviceUnusedFallbackKeySyncTestCase(unittest.HomeserverTestCase): self.assertEqual(res, []) # Upload a fallback key for the user/device - fallback_key = {"alg1:k1": "fallback_key1"} self.get_success( self.e2e_keys_handler.upload_keys_for_user( alice_user_id, test_device_id, - {"fallback_keys": fallback_key}, + {"fallback_keys": {"alg1:k1": "fallback_key1"}}, ) ) # We should now have an unused alg1 key @@ -1252,6 +1252,8 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.store = hs.get_datastores().main self.event_sources = hs.get_event_sources() self.storage_controllers = hs.get_storage_controllers() + self.account_data_handler = hs.get_account_data_handler() + self.notifier = hs.get_notifier() def _assertRequiredStateIncludes( self, @@ -1377,6 +1379,52 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): return room_id + def _bump_notifier_wait_for_events(self, user_id: str) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + """ + # We're expecting some new activity from this point onwards + from_token = self.event_sources.get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = self.notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data so that `notifier.wait_for_events(...)` wakes up. + # We're bumping account data because it won't show up in the Sliding Sync + # response so it won't affect whether we have results. + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + def test_sync_list(self) -> None: """ Test that room IDs show up in the Sliding Sync `lists` @@ -1482,6 +1530,124 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # with because we weren't able to find anything new yet. self.assertEqual(channel.json_body["pos"], future_position_token_serialized) + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the room with new events to trigger new results + event_response1 = self.helper.send( + room_id, "new activity in room", tok=user1_tok + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check to make sure the new event is returned + self.assertEqual( + [ + event["event_id"] + for event in channel.json_body["rooms"][room_id]["timeline"] + ], + [ + event_response1["event_id"], + ], + channel.json_body["rooms"][room_id]["timeline"], + ) + + # TODO: Once we remove `ops`, we should be able to add a `RoomResult.__bool__` to + # check if there are any updates since the `from_token`. + @skip_unless( + False, + "Once we remove ops from the Sliding Sync response, this test should pass", + ) + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events(user1_id) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + # We still see rooms because that's how Sliding Sync lists work but we reached + # the timeout before seeing them + self.assertEqual( + [event["event_id"] for event in channel.json_body["rooms"].keys()], + [room_id], + ) + def test_filter_list(self) -> None: """ Test that filters apply to `lists` @@ -1508,11 +1674,11 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) # Create a normal room - room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id, user1_id, tok=user1_tok) # Create a room that user1 is invited to - invite_room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) # Make the Sliding Sync request @@ -4320,10 +4486,59 @@ class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.account_data_handler = hs.get_account_data_handler() + self.notifier = hs.get_notifier() self.sync_endpoint = ( "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" ) + def _bump_notifier_wait_for_events(self, user_id: str) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + """ + # We're expecting some new activity from this point onwards + from_token = self.event_sources.get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = self.notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data so that `notifier.wait_for_events(...)` wakes up. + # We're bumping account data because it won't show up in the Sliding Sync + # response so it won't affect whether we have results. + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + def _assert_to_device_response( self, channel: FakeChannel, expected_messages: List[JsonDict] ) -> str: @@ -4487,3 +4702,605 @@ class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): access_token=user1_tok, ) self._assert_to_device_response(channel, []) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", "d1") + user2_id = self.register_user("u2", "pass") + user2_tok = self.login(user2_id, "pass", "d2") + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the to-device messages to trigger new results + test_msg = {"foo": "bar"} + send_to_device_channel = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user1_id: {"d1": test_msg}}}, + access_token=user2_tok, + ) + self.assertEqual( + send_to_device_channel.code, 200, send_to_device_channel.result + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + self._assert_to_device_response( + channel, + [{"content": test_msg, "sender": user2_id, "type": "m.test"}], + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the To-Device extension doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events(user1_id) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + self._assert_to_device_response(channel, []) + + +class SlidingSyncE2eeExtensionTestCase(unittest.HomeserverTestCase): + """Tests for the e2ee sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + devices.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.e2e_keys_handler = hs.get_e2e_keys_handler() + self.account_data_handler = hs.get_account_data_handler() + self.notifier = hs.get_notifier() + self.sync_endpoint = ( + "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + ) + + def _bump_notifier_wait_for_events(self, user_id: str) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + """ + # We're expecting some new activity from this point onwards + from_token = self.event_sources.get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = self.notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data so that `notifier.wait_for_events(...)` wakes up. + # We're bumping account data because it won't show up in the Sliding Sync + # response so it won't affect whether we have results. + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + + def test_no_data_initial_sync(self) -> None: + """ + Test that enabling e2ee extension works during an intitial sync, even if there + is no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Make an initial Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device list updates are only present for incremental syncs + self.assertIsNone(channel.json_body["extensions"]["e2ee"].get("device_lists")) + + # Both of these should be present even when empty + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # This is always present because of + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_no_data_incremental_sync(self) -> None: + """ + Test that enabling e2ee extension works during an incremental sync, even if + there is no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + from_token = self.event_sources.get_current_token() + + # Make an incremental Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + + f"?pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device list shows up for incremental syncs + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + # Both of these should be present even when empty + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + test_device_id = "TESTDEVICE" + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass", device_id=test_device_id) + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.join(room_id, user3_id, tok=user3_tok) + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the device lists to trigger new results + # Have user3 update their device list + device_update_channel = self.make_request( + "PUT", + f"/devices/{test_device_id}", + { + "display_name": "New Device Name", + }, + access_token=user3_tok, + ) + self.assertEqual( + device_update_channel.code, 200, device_update_channel.json_body + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the device list update + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [user3_id], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the E2EE extension doesn't trigger a false-positive for new data (see + `device_one_time_keys_count.signed_curve25519`). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events(user1_id) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device lists are present for incremental syncs but empty because no device changes + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + # Both of these should be present even when empty + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_device_lists(self) -> None: + """ + Test that device list updates are included in the response + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + test_device_id = "TESTDEVICE" + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass", device_id=test_device_id) + + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.join(room_id, user3_id, tok=user3_tok) + self.helper.join(room_id, user4_id, tok=user4_tok) + + from_token = self.event_sources.get_current_token() + + # Have user3 update their device list + channel = self.make_request( + "PUT", + f"/devices/{test_device_id}", + { + "display_name": "New Device Name", + }, + access_token=user3_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # User4 leaves the room + self.helper.leave(room_id, user4_id, tok=user4_tok) + + # Make an incremental Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + + f"?pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device list updates show up + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [user3_id], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [user4_id], + ) + + def test_device_one_time_keys_count(self) -> None: + """ + Test that `device_one_time_keys_count` are included in the response + """ + test_device_id = "TESTDEVICE" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", device_id=test_device_id) + + # Upload one time keys for the user/device + keys: JsonDict = { + "alg1:k1": "key1", + "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}}, + "alg2:k3": {"key": "key3"}, + } + upload_keys_response = self.get_success( + self.e2e_keys_handler.upload_keys_for_user( + user1_id, test_device_id, {"one_time_keys": keys} + ) + ) + self.assertDictEqual( + upload_keys_response, + { + "one_time_key_counts": { + "alg1": 1, + "alg2": 2, + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0, + } + }, + ) + + # Make a Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check for those one time key counts + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_one_time_keys_count"), + { + "alg1": 1, + "alg2": 2, + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0, + }, + ) + + def test_device_unused_fallback_key_types(self) -> None: + """ + Test that `device_unused_fallback_key_types` are included in the response + """ + test_device_id = "TESTDEVICE" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", device_id=test_device_id) + + # We shouldn't have any unused fallback keys yet + res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id) + ) + self.assertEqual(res, []) + + # Upload a fallback key for the user/device + self.get_success( + self.e2e_keys_handler.upload_keys_for_user( + user1_id, + test_device_id, + {"fallback_keys": {"alg1:k1": "fallback_key1"}}, + ) + ) + # We should now have an unused alg1 key + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id) + ) + self.assertEqual(fallback_res, ["alg1"], fallback_res) + + # Make a Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check for the unused fallback key types + self.assertListEqual( + channel.json_body["extensions"]["e2ee"].get( + "device_unused_fallback_key_types" + ), + ["alg1"], + ) From e3a0681ecffcafa898f0797b9f23c0af9c923949 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 23:42:36 +0000 Subject: [PATCH 059/332] Bump pyopenssl from 24.1.0 to 24.2.1 (#17465) Bumps [pyopenssl](https://github.com/pyca/pyopenssl) from 24.1.0 to 24.2.1.
Changelog

Sourced from pyopenssl's changelog.

24.2.1 (2024-07-20)

Backward-incompatible changes: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

Deprecations: ^^^^^^^^^^^^^

Changes: ^^^^^^^^

  • Fixed changelog to remove sphinx specific restructured text strings.

24.2.0 (2024-07-20)

Backward-incompatible changes: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

Deprecations: ^^^^^^^^^^^^^

  • Deprecated OpenSSL.crypto.X509Req, OpenSSL.crypto.load_certificate_request, OpenSSL.crypto.dump_certificate_request. Instead, cryptography.x509.CertificateSigningRequest, cryptography.x509.CertificateSigningRequestBuilder, cryptography.x509.load_der_x509_csr, or cryptography.x509.load_pem_x509_csr should be used.

Changes: ^^^^^^^^

  • Added type hints for the SSL module. [#1308](https://github.com/pyca/pyopenssl/issues/1308) <https://github.com/pyca/pyopenssl/pull/1308>_.
  • Changed OpenSSL.crypto.PKey.from_cryptography_key to accept public and private EC, ED25519, ED448 keys. [#1310](https://github.com/pyca/pyopenssl/issues/1310) <https://github.com/pyca/pyopenssl/pull/1310>_.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyopenssl&package-manager=pip&previous-version=24.1.0&new-version=24.2.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 282219e194..6069867e51 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2013,17 +2013,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" -version = "24.1.0" +version = "24.2.1" description = "Python wrapper module around the OpenSSL library" optional = false python-versions = ">=3.7" files = [ - {file = "pyOpenSSL-24.1.0-py3-none-any.whl", hash = "sha256:17ed5be5936449c5418d1cd269a1a9e9081bc54c17aed272b45856a3d3dc86ad"}, - {file = "pyOpenSSL-24.1.0.tar.gz", hash = "sha256:cabed4bfaa5df9f1a16c0ef64a0cb65318b5cd077a7eda7d6970131ca2f41a6f"}, + {file = "pyOpenSSL-24.2.1-py3-none-any.whl", hash = "sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d"}, + {file = "pyopenssl-24.2.1.tar.gz", hash = "sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95"}, ] [package.dependencies] -cryptography = ">=41.0.5,<43" +cryptography = ">=41.0.5,<44" [package.extras] docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"] From 13a99fba1b0120af34688e5717478404989b8a1a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 23:58:19 +0000 Subject: [PATCH 060/332] Bump hiredis from 2.3.2 to 3.0.0 (#17464) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [hiredis](https://github.com/redis/hiredis-py) from 2.3.2 to 3.0.0.
Release notes

Sourced from hiredis's releases.

3.0.0

Changes

Breaking Changes

  • Return Redis sets as Python lists (#189)

🐛 Bug Fixes

  • Return Redis sets as Python lists (#189)

Contributors

We'd like to thank all the contributors who worked on this release!

@​gerzse

2.4.0

Changes

🧰 Maintenance

  • Fix small typo (#192)
  • Quote version for Python setup action in CI (#191)
  • Fix building the wheel for windows (#190)
  • pack: Replace sdsalloc.h with alloc.h (#159)
  • Bump black from 22.3.0 to 24.3.0 (#185)
  • Removing python 3.7 trove (#181)
  • Badge for latest released on Pypi (#182)
  • Sync license in metadata with LICENSE file (#183)

Contributors

We'd like to thank all the contributors who worked on this release!

@​Apteryks, @​ArtemIsmagilov, @​chayim, @​dependabot, @​dependabot[bot], @​gerzse and @​shadchin

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=hiredis&package-manager=pip&previous-version=2.3.2&new-version=3.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 207 ++++++++++++++++++++++++---------------------------- 1 file changed, 96 insertions(+), 111 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6069867e51..945b91e022 100644 --- a/poetry.lock +++ b/poetry.lock @@ -542,120 +542,105 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", [[package]] name = "hiredis" -version = "2.3.2" +version = "3.0.0" description = "Python wrapper for hiredis" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:742093f33d374098aa21c1696ac6e4874b52658c870513a297a89265a4d08fe5"}, - {file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:9e14fb70ca4f7efa924f508975199353bf653f452e4ef0a1e47549e208f943d7"}, - {file = "hiredis-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d7302b4b17fcc1cc727ce84ded7f6be4655701e8d58744f73b09cb9ed2b13df"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed63e8b75c193c5e5a8288d9d7b011da076cc314fafc3bfd59ec1d8a750d48c8"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b4edee59dc089bc3948f4f6fba309f51aa2ccce63902364900aa0a553a85e97"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6481c3b7673a86276220140456c2a6fbfe8d1fb5c613b4728293c8634134824"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684840b014ce83541a087fcf2d48227196576f56ae3e944d4dfe14c0a3e0ccb7"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c4c0bcf786f0eac9593367b6279e9b89534e008edbf116dcd0de956524702c8"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66ab949424ac6504d823cba45c4c4854af5c59306a1531edb43b4dd22e17c102"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:322c668ee1c12d6c5750a4b1057e6b4feee2a75b3d25d630922a463cfe5e7478"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bfa73e3f163c6e8b2ec26f22285d717a5f77ab2120c97a2605d8f48b26950dac"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:7f39f28ffc65de577c3bc0c7615f149e35bc927802a0f56e612db9b530f316f9"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:55ce31bf4711da879b96d511208efb65a6165da4ba91cb3a96d86d5a8d9d23e6"}, - {file = "hiredis-2.3.2-cp310-cp310-win32.whl", hash = "sha256:3dd63d0bbbe75797b743f35d37a4cca7ca7ba35423a0de742ae2985752f20c6d"}, - {file = "hiredis-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:ea002656a8d974daaf6089863ab0a306962c8b715db6b10879f98b781a2a5bf5"}, - {file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:adfbf2e9c38b77d0db2fb32c3bdaea638fa76b4e75847283cd707521ad2475ef"}, - {file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:80b02d27864ebaf9b153d4b99015342382eeaed651f5591ce6f07e840307c56d"}, - {file = "hiredis-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd40d2e2f82a483de0d0a6dfd8c3895a02e55e5c9949610ecbded18188fd0a56"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfa904045d7cebfb0f01dad51352551cce1d873d7c3f80c7ded7d42f8cac8f89"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28bd184b33e0dd6d65816c16521a4ba1ffbe9ff07d66873c42ea4049a62fed83"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f70481213373d44614148f0f2e38e7905be3f021902ae5167289413196de4ba4"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8797b528c1ff81eef06713623562b36db3dafa106b59f83a6468df788ff0d1"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02fc71c8333586871602db4774d3a3e403b4ccf6446dc4603ec12df563127cee"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0da56915bda1e0a49157191b54d3e27689b70960f0685fdd5c415dacdee2fbed"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e2674a5a3168349435b08fa0b82998ed2536eb9acccf7087efe26e4cd088a525"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:dc1c3fd49930494a67dcec37d0558d99d84eca8eb3f03b17198424538f2608d7"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:14c7b43205e515f538a9defb4e411e0f0576caaeeda76bb9993ed505486f7562"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7bac7e02915b970c3723a7a7c5df4ba7a11a3426d2a3f181e041aa506a1ff028"}, - {file = "hiredis-2.3.2-cp311-cp311-win32.whl", hash = "sha256:63a090761ddc3c1f7db5e67aa4e247b4b3bb9890080bdcdadd1b5200b8b89ac4"}, - {file = "hiredis-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:70d226ab0306a5b8d408235cabe51d4bf3554c9e8a72d53ce0b3c5c84cf78881"}, - {file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5c614552c6bd1d0d907f448f75550f6b24fb56cbfce80c094908b7990cad9702"}, - {file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9c431431abf55b64347ddc8df68b3ef840269cb0aa5bc2d26ad9506eb4b1b866"}, - {file = "hiredis-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a45857e87e9d2b005e81ddac9d815a33efd26ec67032c366629f023fe64fb415"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e138d141ec5a6ec800b6d01ddc3e5561ce1c940215e0eb9960876bfde7186aae"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:387f655444d912a963ab68abf64bf6e178a13c8e4aa945cb27388fd01a02e6f1"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4852f4bf88f0e2d9bdf91279892f5740ed22ae368335a37a52b92a5c88691140"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d711c107e83117129b7f8bd08e9820c43ceec6204fff072a001fd82f6d13db9f"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92830c16885f29163e1c2da1f3c1edb226df1210ec7e8711aaabba3dd0d5470a"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:16b01d9ceae265d4ab9547be0cd628ecaff14b3360357a9d30c029e5ae8b7e7f"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5986fb5f380169270a0293bebebd95466a1c85010b4f1afc2727e4d17c452512"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:49532d7939cc51f8e99efc326090c54acf5437ed88b9c904cc8015b3c4eda9c9"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8f34801b251ca43ad70691fb08b606a2e55f06b9c9fb1fc18fd9402b19d70f7b"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7298562a49d95570ab1c7fc4051e72824c6a80e907993a21a41ba204223e7334"}, - {file = "hiredis-2.3.2-cp312-cp312-win32.whl", hash = "sha256:e1d86b75de787481b04d112067a4033e1ecfda2a060e50318a74e4e1c9b2948c"}, - {file = "hiredis-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:6dbfe1887ffa5cf3030451a56a8f965a9da2fa82b7149357752b67a335a05fc6"}, - {file = "hiredis-2.3.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:4fc242e9da4af48714199216eb535b61e8f8d66552c8819e33fc7806bd465a09"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e81aa4e9a1fcf604c8c4b51aa5d258e195a6ba81efe1da82dea3204443eba01c"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:419780f8583ddb544ffa86f9d44a7fcc183cd826101af4e5ffe535b6765f5f6b"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6871306d8b98a15e53a5f289ec1106a3a1d43e7ab6f4d785f95fcef9a7bd9504"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb0b35b63717ef1e41d62f4f8717166f7c6245064957907cfe177cc144357c"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c490191fa1218851f8a80c5a21a05a6f680ac5aebc2e688b71cbfe592f8fec6"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4baf4b579b108062e91bd2a991dc98b9dc3dc06e6288db2d98895eea8acbac22"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e627d8ef5e100556e09fb44c9571a432b10e11596d3c4043500080ca9944a91a"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:ba3dc0af0def8c21ce7d903c59ea1e8ec4cb073f25ece9edaec7f92a286cd219"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:56e9b7d6051688ca94e68c0c8a54a243f8db841911b683cedf89a29d4de91509"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:380e029bb4b1d34cf560fcc8950bf6b57c2ef0c9c8b7c7ac20b7c524a730fadd"}, - {file = "hiredis-2.3.2-cp37-cp37m-win32.whl", hash = "sha256:948d9f2ca7841794dd9b204644963a4bcd69ced4e959b0d4ecf1b8ce994a6daa"}, - {file = "hiredis-2.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:cfa67afe2269b2d203cd1389c00c5bc35a287cd57860441fb0e53b371ea6a029"}, - {file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bcbe47da0aebc00a7cfe3ebdcff0373b86ce2b1856251c003e3d69c9db44b5a7"}, - {file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f2c9c0d910dd3f7df92f0638e7f65d8edd7f442203caf89c62fc79f11b0b73f8"}, - {file = "hiredis-2.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:01b6c24c0840ac7afafbc4db236fd55f56a9a0919a215c25a238f051781f4772"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1f567489f422d40c21e53212a73bef4638d9f21043848150f8544ef1f3a6ad1"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28adecb308293e705e44087a1c2d557a816f032430d8a2a9bb7873902a1c6d48"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27e9619847e9dc70b14b1ad2d0fb4889e7ca18996585c3463cff6c951fd6b10b"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a0026cfbf29f07649b0e34509091a2a6016ff8844b127de150efce1c3aff60b"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9de7586522e5da6bee83c9cf0dcccac0857a43249cb4d721a2e312d98a684d1"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e58494f282215fc461b06709e9a195a24c12ba09570f25bdf9efb036acc05101"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:de3a32b4b76d46f1eb42b24a918d51d8ca52411a381748196241d59a895f7c5c"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1979334ccab21a49c544cd1b8d784ffb2747f99a51cb0bd0976eebb517628382"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0c0773266e1c38a06e7593bd08870ac1503f5f0ce0f5c63f2b4134b090b5d6a4"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bd1cee053416183adcc8e6134704c46c60c3f66b8faaf9e65bf76191ca59a2f7"}, - {file = "hiredis-2.3.2-cp38-cp38-win32.whl", hash = "sha256:5341ce3d01ef3c7418a72e370bf028c7aeb16895e79e115fe4c954fff990489e"}, - {file = "hiredis-2.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:8fc7197ff33047ce43a67851ccf190acb5b05c52fd4a001bb55766358f04da68"}, - {file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:f47775e27388b58ce52f4f972f80e45b13c65113e9e6b6bf60148f893871dc9b"}, - {file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:9412a06b8a8e09abd6313d96864b6d7713c6003a365995a5c70cfb9209df1570"}, - {file = "hiredis-2.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3020b60e3fc96d08c2a9b011f1c2e2a6bdcc09cb55df93c509b88be5cb791df"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53d0f2c59bce399b8010a21bc779b4f8c32d0f582b2284ac8c98dc7578b27bc4"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57c0d0c7e308ed5280a4900d4468bbfec51f0e1b4cde1deae7d4e639bc6b7766"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d63318ca189fddc7e75f6a4af8eae9c0545863619fb38cfba5f43e81280b286"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e741ffe4e2db78a1b9dd6e5d29678ce37fbaaf65dfe132e5b82a794413302ef1"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb98038ccd368e0d88bd92ee575c58cfaf33e77f788c36b2a89a84ee1936dc6b"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:eae62ed60d53b3561148bcd8c2383e430af38c0deab9f2dd15f8874888ffd26f"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ca33c175c1cf60222d9c6d01c38fc17ec3a484f32294af781de30226b003e00f"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c5f6972d2bdee3cd301d5c5438e31195cf1cabf6fd9274491674d4ceb46914d"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:a6b54dabfaa5dbaa92f796f0c32819b4636e66aa8e9106c3d421624bd2a2d676"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e96cd35df012a17c87ae276196ea8f215e77d6eeca90709eb03999e2d5e3fd8a"}, - {file = "hiredis-2.3.2-cp39-cp39-win32.whl", hash = "sha256:63b99b5ea9fe4f21469fb06a16ca5244307678636f11917359e3223aaeca0b67"}, - {file = "hiredis-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a50c8af811b35b8a43b1590cf890b61ff2233225257a3cad32f43b3ec7ff1b9f"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e8bf4444b09419b77ce671088db9f875b26720b5872d97778e2545cd87dba4a"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bd42d0d45ea47a2f96babd82a659fbc60612ab9423a68e4a8191e538b85542a"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80441b55edbef868e2563842f5030982b04349408396e5ac2b32025fb06b5212"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec444ab8f27562a363672d6a7372bc0700a1bdc9764563c57c5f9efa0e592b5f"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f9f606e810858207d4b4287b4ef0dc622c2aa469548bf02b59dcc616f134f811"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c3dde4ca00fe9eee3b76209711f1941bb86db42b8a75d7f2249ff9dfc026ab0e"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4dd676107a1d3c724a56a9d9db38166ad4cf44f924ee701414751bd18a784a0"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce42649e2676ad783186264d5ffc788a7612ecd7f9effb62d51c30d413a3eefe"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e3f8b1733078ac663dad57e20060e16389a60ab542f18a97931f3a2a2dd64a4"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:532a84a82156a82529ec401d1c25d677c6543c791e54a263aa139541c363995f"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4d59f88c4daa36b8c38e59ac7bffed6f5d7f68eaccad471484bf587b28ccc478"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91a14dd95e24dc078204b18b0199226ee44644974c645dc54ee7b00c3157330"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb777a38797c8c7df0444533119570be18d1a4ce5478dffc00c875684df7bfcb"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d47c915897a99d0d34a39fad4be97b4b709ab3d0d3b779ebccf2b6024a8c681e"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:333b5e04866758b11bda5f5315b4e671d15755fc6ed3b7969721bc6311d0ee36"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c8937f1100435698c18e4da086968c4b5d70e86ea718376f833475ab3277c9aa"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa45f7d771094b8145af10db74704ab0f698adb682fbf3721d8090f90e42cc49"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33d5ebc93c39aed4b5bc769f8ce0819bc50e74bb95d57a35f838f1c4378978e0"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a797d8c7df9944314d309b0d9e1b354e2fa4430a05bb7604da13b6ad291bf959"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e15a408f71a6c8c87b364f1f15a6cd9c1baca12bbc47a326ac8ab99ec7ad3c64"}, - {file = "hiredis-2.3.2.tar.gz", hash = "sha256:733e2456b68f3f126ddaf2cd500a33b25146c3676b97ea843665717bda0c5d43"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:4b182791c41c5eb1d9ed736f0ff81694b06937ca14b0d4dadde5dadba7ff6dae"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:13c275b483a052dd645eb2cb60d6380f1f5215e4c22d6207e17b86be6dd87ffa"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1018cc7f12824506f165027eabb302735b49e63af73eb4d5450c66c88f47026"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83a29cc7b21b746cb6a480189e49f49b2072812c445e66a9e38d2004d496b81c"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e241fab6332e8fb5f14af00a4a9c6aefa22f19a336c069b7ddbf28ef8341e8d6"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1fb8de899f0145d6c4d5d4bd0ee88a78eb980a7ffabd51e9889251b8f58f1785"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b23291951959141173eec10f8573538e9349fa27f47a0c34323d1970bf891ee5"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e421ac9e4b5efc11705a0d5149e641d4defdc07077f748667f359e60dc904420"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:77c8006c12154c37691b24ff293c077300c22944018c3ff70094a33e10c1d795"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:41afc0d3c18b59eb50970479a9c0e5544fb4b95e3a79cf2fbaece6ddefb926fe"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:04ccae6dcd9647eae6025425ab64edb4d79fde8b9e6e115ebfabc6830170e3b2"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fe91d62b0594db5ea7d23fc2192182b1a7b6973f628a9b8b2e0a42a2be721ac6"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:99516d99316062824a24d145d694f5b0d030c80da693ea6f8c4ecf71a251d8bb"}, + {file = "hiredis-3.0.0-cp310-cp310-win32.whl", hash = "sha256:562eaf820de045eb487afaa37e6293fe7eceb5b25e158b5a1974b7e40bf04543"}, + {file = "hiredis-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a1c81c89ed765198da27412aa21478f30d54ef69bf5e4480089d9c3f77b8f882"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:4664dedcd5933364756d7251a7ea86d60246ccf73a2e00912872dacbfcef8978"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:47de0bbccf4c8a9f99d82d225f7672b9dd690d8fd872007b933ef51a302c9fa6"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e43679eca508ba8240d016d8cca9d27342d70184773c15bea78a23c87a1922f1"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13c345e7278c210317e77e1934b27b61394fee0dec2e8bd47e71570900f75823"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00018f22f38530768b73ea86c11f47e8d4df65facd4e562bd78773bd1baef35e"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ea3a86405baa8eb0d3639ced6926ad03e07113de54cb00fd7510cb0db76a89d"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c073848d2b1d5561f3903879ccf4e1a70c9b1e7566c7bdcc98d082fa3e7f0a1d"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a8dffb5f5b3415a4669d25de48b617fd9d44b0bccfc4c2ab24b06406ecc9ecb"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:22c17c96143c2a62dfd61b13803bc5de2ac526b8768d2141c018b965d0333b66"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c3ece960008dab66c6b8bb3a1350764677ee7c74ccd6270aaf1b1caf9ccebb46"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f75999ae00a920f7dce6ecae76fa5e8674a3110e5a75f12c7a2c75ae1af53396"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e069967cbd5e1900aafc4b5943888f6d34937fc59bf8918a1a546cb729b4b1e4"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0aacc0a78e1d94d843a6d191f224a35893e6bdfeb77a4a89264155015c65f126"}, + {file = "hiredis-3.0.0-cp311-cp311-win32.whl", hash = "sha256:719c32147ba29528cb451f037bf837dcdda4ff3ddb6cdb12c4216b0973174718"}, + {file = "hiredis-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:bdc144d56333c52c853c31b4e2e52cfbdb22d3da4374c00f5f3d67c42158970f"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:484025d2eb8f6348f7876fc5a2ee742f568915039fcb31b478fd5c242bb0fe3a"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fcdb552ffd97151dab8e7bc3ab556dfa1512556b48a367db94b5c20253a35ee1"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bb6f9fd92f147ba11d338ef5c68af4fd2908739c09e51f186e1d90958c68cc1"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa86bf9a0ed339ec9e8a9a9d0ae4dccd8671625c83f9f9f2640729b15e07fbfd"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e194a0d5df9456995d8f510eab9f529213e7326af6b94770abf8f8b7952ddcaa"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a1df39d74ec507d79c7a82c8063eee60bf80537cdeee652f576059b9cdd15c"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f91456507427ba36fd81b2ca11053a8e112c775325acc74e993201ea912d63e9"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9862db92ef67a8a02e0d5370f07d380e14577ecb281b79720e0d7a89aedb9ee5"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d10fcd9e0eeab835f492832b2a6edb5940e2f1230155f33006a8dfd3bd2c94e4"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:48727d7d405d03977d01885f317328dc21d639096308de126c2c4e9950cbd3c9"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e0bb6102ebe2efecf8a3292c6660a0e6fac98176af6de67f020bea1c2343717"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:df274e3abb4df40f4c7274dd3e587dfbb25691826c948bc98d5fead019dfb001"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:034925b5fb514f7b11aac38cd55b3fd7e9d3af23bd6497f3f20aa5b8ba58e232"}, + {file = "hiredis-3.0.0-cp312-cp312-win32.whl", hash = "sha256:120f2dda469b28d12ccff7c2230225162e174657b49cf4cd119db525414ae281"}, + {file = "hiredis-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:e584fe5f4e6681d8762982be055f1534e0170f6308a7a90f58d737bab12ff6a8"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:122171ff47d96ed8dd4bba6c0e41d8afaba3e8194949f7720431a62aa29d8895"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ba9fc605ac558f0de67463fb588722878641e6fa1dabcda979e8e69ff581d0bd"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a631e2990b8be23178f655cae8ac6c7422af478c420dd54e25f2e26c29e766f1"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63482db3fadebadc1d01ad33afa6045ebe2ea528eb77ccaabd33ee7d9c2bad48"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f669212c390eebfbe03c4e20181f5970b82c5d0a0ad1df1785f7ffbe7d61150"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a49ef161739f8018c69b371528bdb47d7342edfdee9ddc75a4d8caddf45a6e"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98a152052b8878e5e43a2e3a14075218adafc759547c98668a21e9485882696c"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50a196af0ce657fcde9bf8a0bbe1032e22c64d8fcec2bc926a35e7ff68b3a166"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f2f312eef8aafc2255e3585dcf94d5da116c43ef837db91db9ecdc1bc930072d"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:6ca41fa40fa019cde42c21add74aadd775e71458051a15a352eabeb12eb4d084"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:6eecb343c70629f5af55a8b3e53264e44fa04e155ef7989de13668a0cb102a90"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:c3fdad75e7837a475900a1d3a5cc09aa024293c3b0605155da2d42f41bc0e482"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8854969e7480e8d61ed7549eb232d95082a743e94138d98d7222ba4e9f7ecacd"}, + {file = "hiredis-3.0.0-cp38-cp38-win32.whl", hash = "sha256:f114a6c86edbf17554672b050cce72abf489fe58d583c7921904d5f1c9691605"}, + {file = "hiredis-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:7d99b91e42217d7b4b63354b15b41ce960e27d216783e04c4a350224d55842a4"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:4c6efcbb5687cf8d2aedcc2c3ed4ac6feae90b8547427d417111194873b66b06"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5b5cff42a522a0d81c2ae7eae5e56d0ee7365e0c4ad50c4de467d8957aff4414"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:82f794d564f4bc76b80c50b03267fe5d6589e93f08e66b7a2f674faa2fa76ebc"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a4c1791d7aa7e192f60fe028ae409f18ccdd540f8b1e6aeb0df7816c77e4a4"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2537b2cd98192323fce4244c8edbf11f3cac548a9d633dbbb12b48702f379f4"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fed69bbaa307040c62195a269f82fc3edf46b510a17abb6b30a15d7dab548df"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869f6d5537d243080f44253491bb30aa1ec3c21754003b3bddeadedeb65842b0"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d435ae89073d7cd51e6b6bf78369c412216261c9c01662e7008ff00978153729"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:204b79b30a0e6be0dc2301a4d385bb61472809f09c49f400497f1cdd5a165c66"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3ea635101b739c12effd189cc19b2671c268abb03013fd1f6321ca29df3ca625"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f359175197fd833c8dd7a8c288f1516be45415bb5c939862ab60c2918e1e1943"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ac6d929cb33dd12ad3424b75725975f0a54b5b12dbff95f2a2d660c510aa106d"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:100431e04d25a522ef2c3b94f294c4219c4de3bfc7d557b6253296145a144c11"}, + {file = "hiredis-3.0.0-cp39-cp39-win32.whl", hash = "sha256:e1a9c14ae9573d172dc050a6f63a644457df5d01ec4d35a6a0f097f812930f83"}, + {file = "hiredis-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:54a6dd7b478e6eb01ce15b3bb5bf771e108c6c148315bf194eb2ab776a3cac4d"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:50da7a9edf371441dfcc56288d790985ee9840d982750580710a9789b8f4a290"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9b285ef6bf1581310b0d5e8f6ce64f790a1c40e89c660e1320b35f7515433672"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dcfa684966f25b335072115de2f920228a3c2caf79d4bfa2b30f6e4f674a948"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a41be8af1fd78ca97bc948d789a09b730d1e7587d07ca53af05758f31f4b985d"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:038756db735e417ab36ee6fd7725ce412385ed2bd0767e8179a4755ea11b804f"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:fcecbd39bd42cef905c0b51c9689c39d0cc8b88b1671e7f40d4fb213423aef3a"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a131377493a59fb0f5eaeb2afd49c6540cafcfba5b0b3752bed707be9e7c4eaf"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d22c53f0ec5c18ecb3d92aa9420563b1c5d657d53f01356114978107b00b860"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a91e9520fbc65a799943e5c970ffbcd67905744d8becf2e75f9f0a5e8414f0"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc8043959b50141df58ab4f398e8ae84c6f9e673a2c9407be65fc789138f4a6"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51b99cfac514173d7b8abdfe10338193e8a0eccdfe1870b646009d2fb7cbe4b5"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:fa1fcad89d8a41d8dc10b1e54951ec1e161deabd84ed5a2c95c3c7213bdb3514"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:898636a06d9bf575d2c594129085ad6b713414038276a4bfc5db7646b8a5be78"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:466f836dbcf86de3f9692097a7a01533dc9926986022c6617dc364a402b265c5"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23142a8af92a13fc1e3f2ca1d940df3dcf2af1d176be41fe8d89e30a837a0b60"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:793c80a3d6b0b0e8196a2d5de37a08330125668c8012922685e17aa9108c33ac"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:467d28112c7faa29b7db743f40803d927c8591e9da02b6ce3d5fadc170a542a2"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:dc384874a719c767b50a30750f937af18842ee5e288afba95a5a3ed703b1515a"}, + {file = "hiredis-3.0.0.tar.gz", hash = "sha256:fed8581ae26345dea1f1e0d1a96e05041a727a45e7d8d459164583e23c6ac441"}, ] [[package]] From a9ee832e4899fefcfd27fba475091e1ffaa069b8 Mon Sep 17 00:00:00 2001 From: Michael Hollister Date: Tue, 23 Jul 2024 04:59:24 -0500 Subject: [PATCH 061/332] Fixed presence results not returning offline users on initial sync (#17231) This is to address an issue in which `m.presence` results on initial sync are not returning entries of users who are currently offline. The original behaviour was from https://github.com/element-hq/synapse/issues/1535 This change is useful for applications that use the presence system for tracking user profile information/updates (e.g. https://github.com/element-hq/synapse/pull/16992 or for profile status messages). This is gated behind a new configuration option to avoid performance impact for applications that don't need this, as a pragmatic solution for now. --- changelog.d/17231.bugfix | 1 + docs/usage/configuration/config_documentation.md | 5 +++++ synapse/config/server.py | 5 +++++ synapse/handlers/sync.py | 6 +++++- 4 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17231.bugfix diff --git a/changelog.d/17231.bugfix b/changelog.d/17231.bugfix new file mode 100644 index 0000000000..d09b455654 --- /dev/null +++ b/changelog.d/17231.bugfix @@ -0,0 +1 @@ +Added configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index e8bc2df798..649f4f71c7 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -246,6 +246,7 @@ Example configuration: ```yaml presence: enabled: false + include_offline_users_on_sync: false ``` `enabled` can also be set to a special value of "untracked" which ignores updates @@ -254,6 +255,10 @@ received via clients and federation, while still accepting updates from the *The "untracked" option was added in Synapse 1.96.0.* +When clients perform an initial or `full_state` sync, presence results for offline users are +not included by default. Setting `include_offline_users_on_sync` to `true` will always include +offline users in the results. Defaults to false. + --- ### `require_auth_for_profile_requests` diff --git a/synapse/config/server.py b/synapse/config/server.py index 8bb97df175..fd52c0475c 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -384,6 +384,11 @@ class ServerConfig(Config): # Whether to internally track presence, requires that presence is enabled, self.track_presence = self.presence_enabled and presence_enabled != "untracked" + # Determines if presence results for offline users are included on initial/full sync + self.presence_include_offline_users_on_sync = presence_config.get( + "include_offline_users_on_sync", False + ) + # Custom presence router module # This is the legacy way of configuring it (the config should now be put in the modules section) self.presence_router_module_class = None diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index de227faec3..ede014180c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -2270,7 +2270,11 @@ class SyncHandler: user=user, from_key=presence_key, is_guest=sync_config.is_guest, - include_offline=include_offline, + include_offline=( + True + if self.hs_config.server.presence_include_offline_users_on_sync + else include_offline + ), ) assert presence_key sync_result_builder.now_token = now_token.copy_and_replace( From 1daae43f3aded13ccab1fbacbe8fa299cc2e359e Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 23 Jul 2024 11:51:34 +0100 Subject: [PATCH 062/332] Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. (#17428) Introduced in: #17215 This caused us a minor bit of grief as the volume of logs produced was much higher than normal --------- Signed-off-by: Olivier 'reivilibre --- changelog.d/17428.bugfix | 1 + synapse/notifier.py | 13 ++++++++----- synapse/types/__init__.py | 23 +++++++++++++++++++++++ 3 files changed, 32 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17428.bugfix diff --git a/changelog.d/17428.bugfix b/changelog.d/17428.bugfix new file mode 100644 index 0000000000..cbfd7b0c39 --- /dev/null +++ b/changelog.d/17428.bugfix @@ -0,0 +1 @@ +Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. \ No newline at end of file diff --git a/synapse/notifier.py b/synapse/notifier.py index c3ecf86ec4..7a2b54036c 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -773,6 +773,7 @@ class Notifier: stream_token = await self.event_sources.bound_future_token(stream_token) start = self.clock.time_msec() + logged = False while True: current_token = self.event_sources.get_current_token() if stream_token.is_before_or_eq(current_token): @@ -783,11 +784,13 @@ class Notifier: if now - start > 10_000: return False - logger.info( - "Waiting for current token to reach %s; currently at %s", - stream_token, - current_token, - ) + if not logged: + logger.info( + "Waiting for current token to reach %s; currently at %s", + stream_token, + current_token, + ) + logged = True # TODO: be better await self.clock.sleep(0.5) diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 046cdc29cd..c0d30ac2a3 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -777,6 +777,13 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): return super().bound_stream_token(max_stream) + def __str__(self) -> str: + instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items())) + return ( + f"RoomStreamToken(stream: {self.stream}, topological: {self.topological}, " + f"instances: {{{instances}}})" + ) + @attr.s(frozen=True, slots=True, order=False) class MultiWriterStreamToken(AbstractMultiWriterStreamToken): @@ -873,6 +880,13 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): return True + def __str__(self) -> str: + instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items())) + return ( + f"MultiWriterStreamToken(stream: {self.stream}, " + f"instances: {{{instances}}})" + ) + class StreamKeyType(Enum): """Known stream types. @@ -1131,6 +1145,15 @@ class StreamToken: return True + def __str__(self) -> str: + return ( + f"StreamToken(room: {self.room_key}, presence: {self.presence_key}, " + f"typing: {self.typing_key}, receipt: {self.receipt_key}, " + f"account_data: {self.account_data_key}, push_rules: {self.push_rules_key}, " + f"to_device: {self.to_device_key}, device_list: {self.device_list_key}, " + f"groups: {self.groups_key}, un_partial_stated_rooms: {self.un_partial_stated_rooms_key})" + ) + StreamToken.START = StreamToken( RoomStreamToken(stream=0), 0, 0, MultiWriterStreamToken(stream=0), 0, 0, 0, 0, 0, 0 From d225b6b3ebea419bdf0e6c0f1476544053f2dcbf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2024 14:03:14 +0100 Subject: [PATCH 063/332] Speed up SS room sorting (#17468) We do this by bulk fetching the latest stream ordering. --------- Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17468.misc | 1 + synapse/handlers/sliding_sync.py | 43 +++--- .../databases/main/event_federation.py | 5 + synapse/storage/databases/main/stream.py | 123 +++++++++++++++++- synapse/util/caches/stream_change_cache.py | 12 +- tests/util/test_stream_change_cache.py | 4 +- 6 files changed, 159 insertions(+), 29 deletions(-) create mode 100644 changelog.d/17468.misc diff --git a/changelog.d/17468.misc b/changelog.d/17468.misc new file mode 100644 index 0000000000..d908776204 --- /dev/null +++ b/changelog.d/17468.misc @@ -0,0 +1 @@ +Speed up sorting of the room list in sliding sync. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 886d7c7159..554ab59bf3 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -1230,34 +1230,33 @@ class SlidingSyncHandler: # Assemble a map of room ID to the `stream_ordering` of the last activity that the # user should see in the room (<= `to_token`) last_activity_in_room_map: Dict[str, int] = {} + for room_id, room_for_user in sync_room_map.items(): - # If they are fully-joined to the room, let's find the latest activity - # at/before the `to_token`. - if room_for_user.membership == Membership.JOIN: - last_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, to_token.room_key - ) - ) - - # If the room has no events at/before the `to_token`, this is probably a - # mistake in the code that generates the `sync_room_map` since that should - # only give us rooms that the user had membership in during the token range. - assert last_event_result is not None - - _, event_pos = last_event_result - - last_activity_in_room_map[room_id] = event_pos.stream - else: - # Otherwise, if the user has left/been invited/knocked/been banned from - # a room, they shouldn't see anything past that point. + if room_for_user.membership != Membership.JOIN: + # If the user has left/been invited/knocked/been banned from a + # room, they shouldn't see anything past that point. # - # FIXME: It's possible that people should see beyond this point in - # invited/knocked cases if for example the room has + # FIXME: It's possible that people should see beyond this point + # in invited/knocked cases if for example the room has # `invite`/`world_readable` history visibility, see # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 last_activity_in_room_map[room_id] = room_for_user.event_pos.stream + # For fully-joined rooms, we find the latest activity at/before the + # `to_token`. + joined_room_positions = ( + await self.store.bulk_get_last_event_pos_in_room_before_stream_ordering( + [ + room_id + for room_id, room_for_user in sync_room_map.items() + if room_for_user.membership == Membership.JOIN + ], + to_token.room_key, + ) + ) + + last_activity_in_room_map.update(joined_room_positions) + return sorted( sync_room_map.values(), # Sort by the last activity (stream_ordering) in the room diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 24abab4a23..715846865b 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1313,6 +1313,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas # We want to make the cache more effective, so we clamp to the last # change before the given ordering. last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id) # type: ignore[attr-defined] + if last_change is None: + # If the room isn't in the cache we know that the last change was + # somewhere before the earliest known position of the cache, so we + # can clamp to that. + last_change = self._events_stream_cache.get_earliest_known_position() # type: ignore[attr-defined] # We don't always have a full stream_to_exterm_id table, e.g. after # the upgrade that introduced it, so we make sure we never ask for a diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index e74e0d2e91..b034361aec 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -78,10 +78,11 @@ from synapse.storage.database import ( from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.types import PersistedEventPosition, RoomStreamToken +from synapse.types import PersistedEventPosition, RoomStreamToken, StrCollection from synapse.util.caches.descriptors import cached from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.cancellation import cancellable +from synapse.util.iterutils import batch_iter if TYPE_CHECKING: from synapse.server import HomeServer @@ -1293,6 +1294,126 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): get_last_event_pos_in_room_before_stream_ordering_txn, ) + async def bulk_get_last_event_pos_in_room_before_stream_ordering( + self, + room_ids: StrCollection, + end_token: RoomStreamToken, + ) -> Dict[str, int]: + """Bulk fetch the stream position of the latest events in the given + rooms + """ + + min_token = end_token.stream + max_token = end_token.get_max_stream_pos() + results: Dict[str, int] = {} + + # First, we check for the rooms in the stream change cache to see if we + # can just use the latest position from it. + missing_room_ids: Set[str] = set() + for room_id in room_ids: + stream_pos = self._events_stream_cache.get_max_pos_of_last_change(room_id) + if stream_pos and stream_pos <= min_token: + results[room_id] = stream_pos + else: + missing_room_ids.add(room_id) + + # Next, we query the stream position from the DB. At first we fetch all + # positions less than the *max* stream pos in the token, then filter + # them down. We do this as a) this is a cheaper query, and b) the vast + # majority of rooms will have a latest token from before the min stream + # pos. + + def bulk_get_last_event_pos_txn( + txn: LoggingTransaction, batch_room_ids: StrCollection + ) -> Dict[str, int]: + # This query fetches the latest stream position in the rooms before + # the given max position. + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", batch_room_ids + ) + sql = f""" + SELECT room_id, ( + SELECT stream_ordering FROM events AS e + LEFT JOIN rejections USING (event_id) + WHERE e.room_id = r.room_id + AND stream_ordering <= ? + AND NOT outlier + AND rejection_reason IS NULL + ORDER BY stream_ordering DESC + LIMIT 1 + ) + FROM rooms AS r + WHERE {clause} + """ + txn.execute(sql, [max_token] + args) + return {row[0]: row[1] for row in txn} + + recheck_rooms: Set[str] = set() + for batched in batch_iter(missing_room_ids, 1000): + result = await self.db_pool.runInteraction( + "bulk_get_last_event_pos_in_room_before_stream_ordering", + bulk_get_last_event_pos_txn, + batched, + ) + + # Check that the stream position for the rooms are from before the + # minimum position of the token. If not then we need to fetch more + # rows. + for room_id, stream in result.items(): + if stream <= min_token: + results[room_id] = stream + else: + recheck_rooms.add(room_id) + + if not recheck_rooms: + return results + + # For the remaining rooms we need to fetch all rows between the min and + # max stream positions in the end token, and filter out the rows that + # are after the end token. + # + # This query should be fast as the range between the min and max should + # be small. + + def bulk_get_last_event_pos_recheck_txn( + txn: LoggingTransaction, batch_room_ids: StrCollection + ) -> Dict[str, int]: + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", batch_room_ids + ) + sql = f""" + SELECT room_id, instance_name, stream_ordering + FROM events + WHERE ? < stream_ordering AND stream_ordering <= ? + AND NOT outlier + AND rejection_reason IS NULL + AND {clause} + ORDER BY stream_ordering ASC + """ + txn.execute(sql, [min_token, max_token] + args) + + # We take the max stream ordering that is less than the token. Since + # we ordered by stream ordering we just need to iterate through and + # take the last matching stream ordering. + txn_results: Dict[str, int] = {} + for row in txn: + room_id = row[0] + event_pos = PersistedEventPosition(row[1], row[2]) + if not event_pos.persisted_after(end_token): + txn_results[room_id] = event_pos.stream + + return txn_results + + for batched in batch_iter(recheck_rooms, 1000): + recheck_result = await self.db_pool.runInteraction( + "bulk_get_last_event_pos_in_room_before_stream_ordering_recheck", + bulk_get_last_event_pos_recheck_txn, + batched, + ) + results.update(recheck_result) + + return results + async def get_current_room_stream_token_for_room_id( self, room_id: str ) -> RoomStreamToken: diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 91c335f85b..16fcb00206 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -327,7 +327,7 @@ class StreamChangeCache: for entity in r: self._entity_to_key.pop(entity, None) - def get_max_pos_of_last_change(self, entity: EntityType) -> int: + def get_max_pos_of_last_change(self, entity: EntityType) -> Optional[int]: """Returns an upper bound of the stream id of the last change to an entity. @@ -335,7 +335,11 @@ class StreamChangeCache: entity: The entity to check. Return: - The stream position of the latest change for the given entity or - the earliest known stream position if the entitiy is unknown. + The stream position of the latest change for the given entity, if + known """ - return self._entity_to_key.get(entity, self._earliest_known_stream_pos) + return self._entity_to_key.get(entity) + + def get_earliest_known_position(self) -> int: + """Returns the earliest position in the cache.""" + return self._earliest_known_stream_pos diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py index 5d38718a50..af1199ef8a 100644 --- a/tests/util/test_stream_change_cache.py +++ b/tests/util/test_stream_change_cache.py @@ -249,5 +249,5 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): self.assertEqual(cache.get_max_pos_of_last_change("bar@baz.net"), 3) self.assertEqual(cache.get_max_pos_of_last_change("user@elsewhere.org"), 4) - # Unknown entities will return the stream start position. - self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), 1) + # Unknown entities will return None + self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), None) From 48c1307911e7123b458910c8ca9d5d85b811c502 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 23 Jul 2024 09:01:43 -0600 Subject: [PATCH 064/332] 1.112.0rc1 --- CHANGES.md | 58 +++++++++++++++++++++++++++++++++++++++ changelog.d/17231.bugfix | 1 - changelog.d/17387.doc | 1 - changelog.d/17416.feature | 1 - changelog.d/17418.feature | 1 - changelog.d/17419.feature | 1 - changelog.d/17423.doc | 1 - changelog.d/17424.misc | 1 - changelog.d/17426.misc | 1 - changelog.d/17428.bugfix | 1 - changelog.d/17429.feature | 1 - changelog.d/17432.feature | 1 - changelog.d/17433.feature | 1 - changelog.d/17434.bugfix | 1 - changelog.d/17435.bugfix | 1 - changelog.d/17438.bugfix | 1 - changelog.d/17439.bugfix | 1 - changelog.d/17449.bugfix | 1 - changelog.d/17451.doc | 1 - changelog.d/17453.misc | 1 - changelog.d/17454.feature | 1 - changelog.d/17458.misc | 1 - changelog.d/17460.misc | 1 - changelog.d/17461.misc | 1 - changelog.d/17468.misc | 1 - changelog.d/17469.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 28 files changed, 65 insertions(+), 26 deletions(-) delete mode 100644 changelog.d/17231.bugfix delete mode 100644 changelog.d/17387.doc delete mode 100644 changelog.d/17416.feature delete mode 100644 changelog.d/17418.feature delete mode 100644 changelog.d/17419.feature delete mode 100644 changelog.d/17423.doc delete mode 100644 changelog.d/17424.misc delete mode 100644 changelog.d/17426.misc delete mode 100644 changelog.d/17428.bugfix delete mode 100644 changelog.d/17429.feature delete mode 100644 changelog.d/17432.feature delete mode 100644 changelog.d/17433.feature delete mode 100644 changelog.d/17434.bugfix delete mode 100644 changelog.d/17435.bugfix delete mode 100644 changelog.d/17438.bugfix delete mode 100644 changelog.d/17439.bugfix delete mode 100644 changelog.d/17449.bugfix delete mode 100644 changelog.d/17451.doc delete mode 100644 changelog.d/17453.misc delete mode 100644 changelog.d/17454.feature delete mode 100644 changelog.d/17458.misc delete mode 100644 changelog.d/17460.misc delete mode 100644 changelog.d/17461.misc delete mode 100644 changelog.d/17468.misc delete mode 100644 changelog.d/17469.misc diff --git a/CHANGES.md b/CHANGES.md index 0a2b816ed1..51d8b177cf 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,61 @@ +# Synapse 1.112.0rc1 (2024-07-23) + +### Features + +- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416)) +- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418)) +- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419)) +- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429)) +- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432)) +- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433)) +- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454)) + +### Bugfixes + +- Added configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231)) +- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428)) +- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434)) +- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435)) +- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438)) +- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439)) +- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449)) + +### Improved Documentation + +- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387)) +- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423)) +- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451)) + +### Internal Changes + +- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424)) +- Fix documentation on `RateLimiter#record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426)) +- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453)) +- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458)) +- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460)) +- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461)) +- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468)) +- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469)) + + + +### Updates to locked dependencies + +* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441)) +* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464)) +* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444)) +* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440)) +* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445)) +* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465)) +* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466)) +* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456)) +* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467)) +* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448)) +* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443)) +* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446)) +* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442)) +* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427)) + # Synapse 1.111.0 (2024-07-16) No significant changes since 1.111.0rc2. diff --git a/changelog.d/17231.bugfix b/changelog.d/17231.bugfix deleted file mode 100644 index d09b455654..0000000000 --- a/changelog.d/17231.bugfix +++ /dev/null @@ -1 +0,0 @@ -Added configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. diff --git a/changelog.d/17387.doc b/changelog.d/17387.doc deleted file mode 100644 index 82be10f135..0000000000 --- a/changelog.d/17387.doc +++ /dev/null @@ -1 +0,0 @@ -Update the readme image to have a white background, so that it is readable in dark mode. \ No newline at end of file diff --git a/changelog.d/17416.feature b/changelog.d/17416.feature deleted file mode 100644 index 1d119cf48f..0000000000 --- a/changelog.d/17416.feature +++ /dev/null @@ -1 +0,0 @@ -Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17418.feature b/changelog.d/17418.feature deleted file mode 100644 index c5e56bc500..0000000000 --- a/changelog.d/17418.feature +++ /dev/null @@ -1 +0,0 @@ -Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17419.feature b/changelog.d/17419.feature deleted file mode 100644 index 186a27c470..0000000000 --- a/changelog.d/17419.feature +++ /dev/null @@ -1 +0,0 @@ -Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17423.doc b/changelog.d/17423.doc deleted file mode 100644 index 972bc659e4..0000000000 --- a/changelog.d/17423.doc +++ /dev/null @@ -1 +0,0 @@ -Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. diff --git a/changelog.d/17424.misc b/changelog.d/17424.misc deleted file mode 100644 index d4a81c137f..0000000000 --- a/changelog.d/17424.misc +++ /dev/null @@ -1 +0,0 @@ -Make sure we always use the right logic for enabling the media repo. diff --git a/changelog.d/17426.misc b/changelog.d/17426.misc deleted file mode 100644 index 886e5d4389..0000000000 --- a/changelog.d/17426.misc +++ /dev/null @@ -1 +0,0 @@ -Fix documentation on `RateLimiter#record_action`. \ No newline at end of file diff --git a/changelog.d/17428.bugfix b/changelog.d/17428.bugfix deleted file mode 100644 index cbfd7b0c39..0000000000 --- a/changelog.d/17428.bugfix +++ /dev/null @@ -1 +0,0 @@ -Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. \ No newline at end of file diff --git a/changelog.d/17429.feature b/changelog.d/17429.feature deleted file mode 100644 index 608b75d632..0000000000 --- a/changelog.d/17429.feature +++ /dev/null @@ -1 +0,0 @@ -Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17432.feature b/changelog.d/17432.feature deleted file mode 100644 index c86f04c118..0000000000 --- a/changelog.d/17432.feature +++ /dev/null @@ -1 +0,0 @@ -Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17433.feature b/changelog.d/17433.feature deleted file mode 100644 index ac9b5dee69..0000000000 --- a/changelog.d/17433.feature +++ /dev/null @@ -1 +0,0 @@ -Prepare for authenticated media freeze. \ No newline at end of file diff --git a/changelog.d/17434.bugfix b/changelog.d/17434.bugfix deleted file mode 100644 index c7cce52397..0000000000 --- a/changelog.d/17434.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. diff --git a/changelog.d/17435.bugfix b/changelog.d/17435.bugfix deleted file mode 100644 index 2d06a7c7fc..0000000000 --- a/changelog.d/17435.bugfix +++ /dev/null @@ -1 +0,0 @@ -Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). diff --git a/changelog.d/17438.bugfix b/changelog.d/17438.bugfix deleted file mode 100644 index cff6eecd48..0000000000 --- a/changelog.d/17438.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. diff --git a/changelog.d/17439.bugfix b/changelog.d/17439.bugfix deleted file mode 100644 index f36c3ec255..0000000000 --- a/changelog.d/17439.bugfix +++ /dev/null @@ -1 +0,0 @@ -Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. \ No newline at end of file diff --git a/changelog.d/17449.bugfix b/changelog.d/17449.bugfix deleted file mode 100644 index cd847a3d1c..0000000000 --- a/changelog.d/17449.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary call to resume producing in fake channel. \ No newline at end of file diff --git a/changelog.d/17451.doc b/changelog.d/17451.doc deleted file mode 100644 index 357ac2c906..0000000000 --- a/changelog.d/17451.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. diff --git a/changelog.d/17453.misc b/changelog.d/17453.misc deleted file mode 100644 index 2978a52477..0000000000 --- a/changelog.d/17453.misc +++ /dev/null @@ -1 +0,0 @@ -Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. diff --git a/changelog.d/17454.feature b/changelog.d/17454.feature deleted file mode 100644 index bb088371bf..0000000000 --- a/changelog.d/17454.feature +++ /dev/null @@ -1 +0,0 @@ -Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17458.misc b/changelog.d/17458.misc deleted file mode 100644 index 09cce15d0d..0000000000 --- a/changelog.d/17458.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up generating sliding sync responses. diff --git a/changelog.d/17460.misc b/changelog.d/17460.misc deleted file mode 100644 index fd99da5a95..0000000000 --- a/changelog.d/17460.misc +++ /dev/null @@ -1 +0,0 @@ -Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. diff --git a/changelog.d/17461.misc b/changelog.d/17461.misc deleted file mode 100644 index 80f7144baa..0000000000 --- a/changelog.d/17461.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up fetching room keys from backup. diff --git a/changelog.d/17468.misc b/changelog.d/17468.misc deleted file mode 100644 index d908776204..0000000000 --- a/changelog.d/17468.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up sorting of the room list in sliding sync. diff --git a/changelog.d/17469.misc b/changelog.d/17469.misc deleted file mode 100644 index ba0419355b..0000000000 --- a/changelog.d/17469.misc +++ /dev/null @@ -1 +0,0 @@ -Implement handling of `$ME` as a state key in sliding sync. diff --git a/debian/changelog b/debian/changelog index 0470e25f2d..5209b9f5fd 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.112.0~rc1) stable; urgency=medium + + * New Synapse release 1.112.0rc1. + + -- Synapse Packaging team Tue, 23 Jul 2024 08:58:55 -0600 + matrix-synapse-py3 (1.111.0) stable; urgency=medium * New Synapse release 1.111.0. diff --git a/pyproject.toml b/pyproject.toml index 50dc0c3c63..0b5dc418e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.111.0" +version = "1.112.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From e8ee784c75d161d1634a60b7c4e419e8f08273b8 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 23 Jul 2024 09:14:45 -0600 Subject: [PATCH 065/332] Address changelog review comments --- CHANGES.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 51d8b177cf..f869674ace 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -12,13 +12,10 @@ ### Bugfixes -- Added configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231)) -- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428)) +- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231)) - Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434)) - Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435)) - Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438)) -- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439)) -- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449)) ### Improved Documentation @@ -29,7 +26,10 @@ ### Internal Changes - Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424)) -- Fix documentation on `RateLimiter#record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426)) +- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426)) +- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428)) +- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439)) +- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449)) - Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453)) - Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458)) - Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460)) From 4b9f4c2abf6352473f1aac911ae42f283495d676 Mon Sep 17 00:00:00 2001 From: Maciej Laskowski Date: Wed, 24 Jul 2024 11:32:56 +0200 Subject: [PATCH 066/332] Update debian template - new link to the delegation docs (#17475) Update debian template - new link to the delegation docs --- debian/templates | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/templates b/debian/templates index cab05715d0..7bfd3c2e9f 100644 --- a/debian/templates +++ b/debian/templates @@ -5,7 +5,7 @@ _Description: Name of the server: servers via federation. This is normally the public hostname of the server running synapse, but can be different if you set up delegation. Please refer to the delegation documentation in this case: - https://github.com/element-hq/synapse/blob/master/docs/delegate.md. + https://element-hq.github.io/synapse/latest/delegate.html. Template: matrix-synapse/report-stats Type: boolean From 8bbc98e66d5b60676e24b0ed7126938396040ab9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jul 2024 11:47:25 +0100 Subject: [PATCH 067/332] Use a new token format for sliding sync (#17452) This is in preparation for adding per-connection state. --------- Co-authored-by: Eric Eastwood --- changelog.d/17452.misc | 1 + synapse/handlers/sliding_sync.py | 30 ++- synapse/rest/client/sync.py | 6 +- synapse/types/__init__.py | 43 +++ synapse/types/handlers/__init__.py | 13 +- tests/rest/client/test_sync.py | 416 ++++++++++++++++------------- 6 files changed, 301 insertions(+), 208 deletions(-) create mode 100644 changelog.d/17452.misc diff --git a/changelog.d/17452.misc b/changelog.d/17452.misc new file mode 100644 index 0000000000..4fd07f617b --- /dev/null +++ b/changelog.d/17452.misc @@ -0,0 +1 @@ +Change sliding sync to use their own token format in preparation for storing per-connection state. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 554ab59bf3..36665db8e1 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -49,6 +49,7 @@ from synapse.types import ( PersistedEventPosition, Requester, RoomStreamToken, + SlidingSyncStreamToken, StateMap, StreamKeyType, StreamToken, @@ -362,7 +363,7 @@ class SlidingSyncHandler: self, requester: Requester, sync_config: SlidingSyncConfig, - from_token: Optional[StreamToken] = None, + from_token: Optional[SlidingSyncStreamToken] = None, timeout_ms: int = 0, ) -> SlidingSyncResult: """ @@ -393,7 +394,7 @@ class SlidingSyncHandler: # this returns false, it means we timed out waiting, and we should # just return an empty response. before_wait_ts = self.clock.time_msec() - if not await self.notifier.wait_for_stream_token(from_token): + if not await self.notifier.wait_for_stream_token(from_token.stream_token): logger.warning( "Timed out waiting for worker to catch up. Returning empty response" ) @@ -431,7 +432,7 @@ class SlidingSyncHandler: sync_config.user.to_string(), timeout_ms, current_sync_callback, - from_token=from_token, + from_token=from_token.stream_token, ) return result @@ -440,7 +441,7 @@ class SlidingSyncHandler: self, sync_config: SlidingSyncConfig, to_token: StreamToken, - from_token: Optional[StreamToken] = None, + from_token: Optional[SlidingSyncStreamToken] = None, ) -> SlidingSyncResult: """ Generates the response body of a Sliding Sync result, represented as a @@ -473,7 +474,7 @@ class SlidingSyncHandler: await self.get_room_membership_for_user_at_to_token( user=sync_config.user, to_token=to_token, - from_token=from_token, + from_token=from_token.stream_token if from_token else None, ) ) @@ -631,8 +632,11 @@ class SlidingSyncHandler: to_token=to_token, ) + # TODO: Update this when we implement per-connection state + connection_token = 0 + return SlidingSyncResult( - next_pos=to_token, + next_pos=SlidingSyncStreamToken(to_token, connection_token), lists=lists, rooms=rooms, extensions=extensions, @@ -1367,7 +1371,7 @@ class SlidingSyncHandler: room_id: str, room_sync_config: RoomSyncConfig, room_membership_for_user_at_to_token: _RoomMembershipForUser, - from_token: Optional[StreamToken], + from_token: Optional[SlidingSyncStreamToken], to_token: StreamToken, ) -> SlidingSyncResult.RoomResult: """ @@ -1431,7 +1435,7 @@ class SlidingSyncHandler: # - TODO: For an incremental sync where we haven't sent it down this # connection before to_bound = ( - from_token.room_key + from_token.stream_token.room_key if from_token is not None and not room_membership_for_user_at_to_token.newly_joined else None @@ -1498,7 +1502,9 @@ class SlidingSyncHandler: instance_name=timeline_event.internal_metadata.instance_name, stream=timeline_event.internal_metadata.stream_ordering, ) - if persisted_position.persisted_after(from_token.room_key): + if persisted_position.persisted_after( + from_token.stream_token.room_key + ): num_live += 1 else: # Since we're iterating over the timeline events in @@ -1786,7 +1792,7 @@ class SlidingSyncHandler: self, sync_config: SlidingSyncConfig, to_token: StreamToken, - from_token: Optional[StreamToken], + from_token: Optional[SlidingSyncStreamToken], ) -> SlidingSyncResult.Extensions: """Handle extension requests. @@ -1900,7 +1906,7 @@ class SlidingSyncHandler: sync_config: SlidingSyncConfig, e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension, to_token: StreamToken, - from_token: Optional[StreamToken], + from_token: Optional[SlidingSyncStreamToken], ) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]: """Handle E2EE device extension (MSC3884) @@ -1922,7 +1928,7 @@ class SlidingSyncHandler: # TODO: This should take into account the `from_token` and `to_token` device_list_updates = await self.device_handler.get_user_ids_changed( user_id=user_id, - from_token=from_token, + from_token=from_token.stream_token, ) device_one_time_keys_count: Mapping[str, int] = {} diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 93fe1d439e..d72dfa2b10 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -54,7 +54,7 @@ from synapse.http.servlet import ( from synapse.http.site import SynapseRequest from synapse.logging.opentracing import trace_with_opname from synapse.rest.admin.experimental_features import ExperimentalFeature -from synapse.types import JsonDict, Requester, StreamToken +from synapse.types import JsonDict, Requester, SlidingSyncStreamToken, StreamToken from synapse.types.rest.client import SlidingSyncBody from synapse.util import json_decoder from synapse.util.caches.lrucache import LruCache @@ -889,7 +889,9 @@ class SlidingSyncRestServlet(RestServlet): from_token = None if from_token_string is not None: - from_token = await StreamToken.from_string(self.store, from_token_string) + from_token = await SlidingSyncStreamToken.from_string( + self.store, from_token_string + ) # TODO: We currently don't know whether we're going to use sticky params or # maybe some filters like sync v2 where they are built up once and referenced diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index c0d30ac2a3..5259550f1c 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -1160,6 +1160,49 @@ StreamToken.START = StreamToken( ) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SlidingSyncStreamToken: + """The same as a `StreamToken`, but includes an extra field at the start for + the sliding sync connection token (separated by a '/'). This is used to + store per-connection state. + + This then looks something like: + 5/s2633508_17_338_6732159_1082514_541479_274711_265584_1_379 + + Attributes: + stream_token: Token representing the position of all the standard + streams. + connection_position: Token used by sliding sync to track updates to any + per-connection state stored by Synapse. + """ + + stream_token: StreamToken + connection_position: int + + @staticmethod + @cancellable + async def from_string(store: "DataStore", string: str) -> "SlidingSyncStreamToken": + """Creates a SlidingSyncStreamToken from its textual representation.""" + try: + connection_position_str, stream_token_str = string.split("/", 1) + connection_position = int(connection_position_str) + stream_token = await StreamToken.from_string(store, stream_token_str) + + return SlidingSyncStreamToken( + stream_token=stream_token, + connection_position=connection_position, + ) + except CancelledError: + raise + except Exception: + raise SynapseError(400, "Invalid stream token") + + async def to_string(self, store: "DataStore") -> str: + """Serializes the token to a string""" + stream_token_str = await self.stream_token.to_string(store) + return f"{self.connection_position}/{stream_token_str}" + + @attr.s(slots=True, frozen=True, auto_attribs=True) class PersistedPosition: """Position of a newly persisted row with instance that persisted it.""" diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 4c6c42db04..59eb0963ee 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -31,7 +31,14 @@ else: from pydantic import Extra from synapse.events import EventBase -from synapse.types import DeviceListUpdates, JsonDict, JsonMapping, StreamToken, UserID +from synapse.types import ( + DeviceListUpdates, + JsonDict, + JsonMapping, + SlidingSyncStreamToken, + StreamToken, + UserID, +) from synapse.types.rest.client import SlidingSyncBody if TYPE_CHECKING: @@ -329,7 +336,7 @@ class SlidingSyncResult: def __bool__(self) -> bool: return bool(self.to_device or self.e2ee) - next_pos: StreamToken + next_pos: SlidingSyncStreamToken lists: Dict[str, SlidingWindowList] rooms: Dict[str, RoomResult] extensions: Extensions @@ -342,7 +349,7 @@ class SlidingSyncResult: return bool(self.lists or self.rooms or self.extensions) @staticmethod - def empty(next_pos: StreamToken) -> "SlidingSyncResult": + def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult": "Return a new empty result" return SlidingSyncResult( next_pos=next_pos, diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 2628869de6..65c5f8ccae 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -21,7 +21,7 @@ import json import logging from http import HTTPStatus -from typing import Any, Dict, Iterable, List +from typing import Any, Dict, Iterable, List, Optional, Tuple from parameterized import parameterized, parameterized_class @@ -50,7 +50,14 @@ from synapse.rest.client import ( sync, ) from synapse.server import HomeServer -from synapse.types import JsonDict, RoomStreamToken, StreamKeyType, StreamToken, UserID +from synapse.types import ( + JsonDict, + RoomStreamToken, + SlidingSyncStreamToken, + StreamKeyType, + StreamToken, + UserID, +) from synapse.util import Clock from tests import unittest @@ -1225,7 +1232,43 @@ class ExcludeRoomTestCase(unittest.HomeserverTestCase): self.assertIn(self.included_room_id, channel.json_body["rooms"]["join"]) -class SlidingSyncTestCase(unittest.HomeserverTestCase): +class SlidingSyncBase(unittest.HomeserverTestCase): + """Base class for sliding sync test cases""" + + sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + + def do_sync( + self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str + ) -> Tuple[JsonDict, str]: + """Do a sliding sync request with given body. + + Asserts the request was successful. + + Attributes: + sync_body: The full request body to use + since: Optional since token + tok: Access token to use + + Returns: + A tuple of the response body and the `pos` field. + """ + + sync_path = self.sync_endpoint + if since: + sync_path += f"?pos={since}" + + channel = self.make_request( + method="POST", + path=sync_path, + content=sync_body, + access_token=tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + return channel.json_body, channel.json_body["pos"] + + +class SlidingSyncTestCase(SlidingSyncBase): """ Tests regarding MSC3575 Sliding Sync `/sync` endpoint. """ @@ -1245,10 +1288,6 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): return config def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - self.sync_endpoint = ( - "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" - ) self.store = hs.get_datastores().main self.event_sources = hs.get_event_sources() self.storage_controllers = hs.get_storage_controllers() @@ -1496,7 +1535,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) future_position_token_serialized = self.get_success( - future_position_token.to_string(self.store) + SlidingSyncStreamToken(future_position_token, 0).to_string(self.store) ) # Make the Sliding Sync request @@ -1544,23 +1583,22 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): room_id = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id, user1_id, tok=user1_tok) - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + "?timeout=10000" - + f"&pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": { - "foo-list": { - "ranges": [[0, 0]], - "required_state": [], - "timeline_limit": 1, - } - } - }, + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, access_token=user1_tok, await_result=False, ) @@ -2771,7 +2809,20 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): room_id1, "activity before token2", tok=user2_tok ) - from_token = self.event_sources.get_current_token() + # The `timeline_limit` is set to 4 so we can at least see one historical event + # before the `from_token`. We should see historical events because this is a + # `newly_joined` room. + timeline_limit = 4 + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": timeline_limit, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Join the room after the `from_token` which will make us consider this room as # `newly_joined`. @@ -2786,24 +2837,11 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): room_id1, "activity after token4", tok=user2_tok ) - # The `timeline_limit` is set to 4 so we can at least see one historical event - # before the `from_token`. We should see historical events because this is a - # `newly_joined` room. - timeline_limit = 4 # Make an incremental Sliding Sync request (what we're trying to test) channel = self.make_request( "POST", - self.sync_endpoint - + f"?pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": timeline_limit, - } - } - }, + self.sync_endpoint + f"?pos={from_token}", + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -2980,7 +3018,16 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.helper.send(room_id1, "activity after invite3", tok=user2_tok) self.helper.send(room_id1, "activity after invite4", tok=user2_tok) - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 3, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) self.helper.send(room_id1, "activity after token5", tok=user2_tok) self.helper.send(room_id1, "activity after toekn6", tok=user2_tok) @@ -2988,17 +3035,8 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + f"?pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 3, - } - } - }, + self.sync_endpoint + f"?pos={from_token}", + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -3237,7 +3275,17 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.helper.send(room_id1, "activity after invite3", tok=user2_tok) self.helper.send(room_id1, "activity after invite4", tok=user2_tok) - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + # Large enough to see the latest events and before the invite + "timeline_limit": 4, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) self.helper.send(room_id1, "activity after token5", tok=user2_tok) self.helper.send(room_id1, "activity after toekn6", tok=user2_tok) @@ -3245,18 +3293,8 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + f"?pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - # Large enough to see the latest events and before the invite - "timeline_limit": 4, - } - } - }, + self.sync_endpoint + f"?pos={from_token}", + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -3402,7 +3440,16 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.helper.send(room_id1, "activity before2", tok=user2_tok) self.helper.join(room_id1, user1_id, tok=user1_tok) - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 4, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok) event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok) @@ -3418,17 +3465,8 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + f"?pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } - } - }, + self.sync_endpoint + f"?pos={from_token}", + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -3479,24 +3517,24 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.helper.send(room_id1, "activity after3", tok=user2_tok) - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 4, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) self.helper.send(room_id1, "activity after4", tok=user2_tok) # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + f"?pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } - } - }, + self.sync_endpoint + f"?pos={from_token}", + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -3614,27 +3652,27 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id1, user1_id, tok=user1_tok) - after_room_token = self.event_sources.get_current_token() + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Tombstone, ""], + ], + "timeline_limit": 0, + } + } + } + _, after_room_token = self.do_sync(sync_body, tok=user1_tok) # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + f"?pos={self.get_success(after_room_token.to_string(self.store))}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.RoomHistoryVisibility, ""], - # This one doesn't exist in the room - [EventTypes.Tombstone, ""], - ], - "timeline_limit": 0, - } - } - }, + self.sync_endpoint + f"?pos={after_room_token}", + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -3966,7 +4004,20 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): user3_id = self.register_user("user3", "pass") user3_tok = self.login(user3_id, "pass") - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, "*"], + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 3, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id1, user1_id, tok=user1_tok) @@ -4004,21 +4055,8 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # Make the Sliding Sync request with lazy loading for the room members channel = self.make_request( "POST", - self.sync_endpoint - + f"?pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.Member, "*"], - ["org.matrix.foo_state", ""], - ], - "timeline_limit": 3, - } - } - }, + self.sync_endpoint + f"?pos={from_token}", + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -4468,7 +4506,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) -class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): +class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): """Tests for the to-device sliding sync extension""" servlets = [ @@ -4714,22 +4752,21 @@ class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): user2_id = self.register_user("u2", "pass") user2_tok = self.login(user2_id, "pass", "d2") - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + "?timeout=10000" - + f"&pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, - }, + self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", + content=sync_body, access_token=user1_tok, await_result=False, ) @@ -4765,22 +4802,21 @@ class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + "?timeout=10000" - + f"&pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, - }, + self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", + content=sync_body, access_token=user1_tok, await_result=False, ) @@ -4801,7 +4837,7 @@ class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): self._assert_to_device_response(channel, []) -class SlidingSyncE2eeExtensionTestCase(unittest.HomeserverTestCase): +class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): """Tests for the e2ee sliding sync extension""" servlets = [ @@ -4924,21 +4960,21 @@ class SlidingSyncE2eeExtensionTestCase(unittest.HomeserverTestCase): user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Make an incremental Sliding Sync request with the e2ee extension enabled channel = self.make_request( "POST", - self.sync_endpoint - + f"?pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - }, + self.sync_endpoint + f"?pos={from_token}", + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -4992,22 +5028,21 @@ class SlidingSyncE2eeExtensionTestCase(unittest.HomeserverTestCase): self.helper.join(room_id, user1_id, tok=user1_tok) self.helper.join(room_id, user3_id, tok=user3_tok) - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + "?timeout=10000" - + f"&pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - }, + self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", + content=sync_body, access_token=user1_tok, await_result=False, ) @@ -5053,22 +5088,21 @@ class SlidingSyncE2eeExtensionTestCase(unittest.HomeserverTestCase): user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + "?timeout=10000" - + f"&pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - }, + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, access_token=user1_tok, await_result=False, ) @@ -5138,7 +5172,15 @@ class SlidingSyncE2eeExtensionTestCase(unittest.HomeserverTestCase): self.helper.join(room_id, user3_id, tok=user3_tok) self.helper.join(room_id, user4_id, tok=user4_tok) - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Have user3 update their device list channel = self.make_request( @@ -5157,16 +5199,8 @@ class SlidingSyncE2eeExtensionTestCase(unittest.HomeserverTestCase): # Make an incremental Sliding Sync request with the e2ee extension enabled channel = self.make_request( "POST", - self.sync_endpoint - + f"?pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - }, + self.sync_endpoint + f"?pos={from_token}", + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) From bdf37ad4c4d66c7a2ca69a29542b01e0856cff48 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jul 2024 15:21:56 +0100 Subject: [PATCH 068/332] Sliding Sync: ensure bump stamp ignores backfilled events (#17478) Backfill events have a negative stream ordering, and so its not useful to use to compare with other (positive) stream orderings. Plus, the Rust SDK currently assumes `bump_stamp` is positive. --- changelog.d/17478.misc | 1 + synapse/handlers/sliding_sync.py | 10 ++- tests/rest/client/test_sync.py | 122 ++++++++++++++++++++++++++++++- 3 files changed, 130 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17478.misc diff --git a/changelog.d/17478.misc b/changelog.d/17478.misc new file mode 100644 index 0000000000..5406c82742 --- /dev/null +++ b/changelog.d/17478.misc @@ -0,0 +1 @@ +Ensure we don't send down negative `bump_stamp` in experimental sliding sync endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 36665db8e1..f1f6f30b95 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -1758,8 +1758,14 @@ class SlidingSyncHandler: bump_stamp = room_membership_for_user_at_to_token.event_pos.stream # But if we found a bump event, use that instead if last_bump_event_result is not None: - _, bump_event_pos = last_bump_event_result - bump_stamp = bump_event_pos.stream + _, new_bump_event_pos = last_bump_event_result + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_event_pos.stream > 0: + bump_stamp = new_bump_event_pos.stream return SlidingSyncResult.RoomResult( name=room_name, diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 65c5f8ccae..6c73f4ec33 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -37,6 +37,7 @@ from synapse.api.constants import ( ReceiptTypes, RelationTypes, ) +from synapse.api.room_versions import RoomVersions from synapse.events import EventBase from synapse.handlers.sliding_sync import StateValues from synapse.rest.client import ( @@ -65,7 +66,7 @@ from tests.federation.transport.test_knocking import ( KnockingStrippedStateEventHelperMixin, ) from tests.server import FakeChannel, TimedOutException -from tests.test_utils.event_injection import mark_event_as_partial_state +from tests.test_utils.event_injection import create_event, mark_event_as_partial_state from tests.unittest import skip_unless logger = logging.getLogger(__name__) @@ -2793,6 +2794,125 @@ class SlidingSyncTestCase(SlidingSyncBase): channel.json_body["rooms"][room_id2], ) + def test_rooms_bump_stamp_backfill(self) -> None: + """ + Test that `bump_stamp` ignores backfilled events, i.e. events with a + negative stream ordering. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote room + creator = "@user:other" + room_id = "!foo:other" + shared_kwargs = { + "room_id": room_id, + "room_version": "10", + } + + create_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[], + type=EventTypes.Create, + state_key="", + sender=creator, + **shared_kwargs, + ) + ) + creator_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[create_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Member, + state_key=creator, + content={"membership": Membership.JOIN}, + sender=creator, + **shared_kwargs, + ) + ) + # We add a message event as a valid "bump type" + msg_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[creator_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Message, + content={"body": "foo", "msgtype": "m.text"}, + sender=creator, + **shared_kwargs, + ) + ) + invite_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[msg_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.INVITE}, + sender=creator, + **shared_kwargs, + ) + ) + + remote_events_and_contexts = [ + create_tuple, + creator_tuple, + msg_tuple, + invite_tuple, + ] + + # Ensure the local HS knows the room version + self.get_success( + self.store.store_room(room_id, creator, False, RoomVersions.V10) + ) + + # Persist these events as backfilled events. + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + + for event, context in remote_events_and_contexts: + self.get_success(persistence.persist_event(event, context, backfilled=True)) + + # Now we join the local user to the room + join_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[invite_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.JOIN}, + sender=user1_id, + **shared_kwargs, + ) + ) + self.get_success(persistence.persist_event(*join_tuple)) + + # Doing an SS request should return a positive `bump_stamp`, even though + # the only event that matches the bump types has as negative stream + # ordering. + channel = self.make_request( + "POST", + self.sync_endpoint, + content={ + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 5, + } + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + self.assertGreater(channel.json_body["rooms"][room_id]["bump_stamp"], 0) + def test_rooms_newly_joined_incremental_sync(self) -> None: """ Test that when we make an incremental sync with a `newly_joined` `rooms`, we are From 729026e604276f2816b45ac7511b8deba0df21fd Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 Jul 2024 17:10:38 -0500 Subject: [PATCH 069/332] Sliding Sync: Add Account Data extension (MSC3959) (#17477) Extensions based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17477.feature | 1 + synapse/handlers/sliding_sync.py | 138 +++++ synapse/rest/client/sync.py | 19 +- synapse/types/handlers/__init__.py | 22 +- synapse/types/rest/client/__init__.py | 18 + tests/rest/client/test_sync.py | 786 ++++++++++++++++++++++++++ 6 files changed, 982 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17477.feature diff --git a/changelog.d/17477.feature b/changelog.d/17477.feature new file mode 100644 index 0000000000..9785a2ef7b --- /dev/null +++ b/changelog.d/17477.feature @@ -0,0 +1 @@ +Add Account Data extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index f1f6f30b95..3231574402 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -46,6 +46,7 @@ from synapse.storage.roommember import MemberSummary from synapse.types import ( DeviceListUpdates, JsonDict, + JsonMapping, PersistedEventPosition, Requester, RoomStreamToken, @@ -357,6 +358,7 @@ class SlidingSyncHandler: self.event_sources = hs.get_event_sources() self.relations_handler = hs.get_relations_handler() self.device_handler = hs.get_device_handler() + self.push_rules_handler = hs.get_push_rules_handler() self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync async def wait_for_sync_for_user( @@ -628,6 +630,7 @@ class SlidingSyncHandler: extensions = await self.get_extensions_response( sync_config=sync_config, + lists=lists, from_token=from_token, to_token=to_token, ) @@ -1797,6 +1800,7 @@ class SlidingSyncHandler: async def get_extensions_response( self, sync_config: SlidingSyncConfig, + lists: Dict[str, SlidingSyncResult.SlidingWindowList], to_token: StreamToken, from_token: Optional[SlidingSyncStreamToken], ) -> SlidingSyncResult.Extensions: @@ -1804,6 +1808,7 @@ class SlidingSyncHandler: Args: sync_config: Sync configuration + lists: Sliding window API. A map of list key to list results. to_token: The point in the stream to sync up to. from_token: The point in the stream to sync from. """ @@ -1828,9 +1833,20 @@ class SlidingSyncHandler: from_token=from_token, ) + account_data_response = None + if sync_config.extensions.account_data is not None: + account_data_response = await self.get_account_data_extension_response( + sync_config=sync_config, + lists=lists, + account_data_request=sync_config.extensions.account_data, + to_token=to_token, + from_token=from_token, + ) + return SlidingSyncResult.Extensions( to_device=to_device_response, e2ee=e2ee_response, + account_data=account_data_response, ) async def get_to_device_extension_response( @@ -1956,3 +1972,125 @@ class SlidingSyncHandler: device_one_time_keys_count=device_one_time_keys_count, device_unused_fallback_key_types=device_unused_fallback_key_types, ) + + async def get_account_data_extension_response( + self, + sync_config: SlidingSyncConfig, + lists: Dict[str, SlidingSyncResult.SlidingWindowList], + account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.AccountDataExtension]: + """Handle Account Data extension (MSC3959) + + Args: + sync_config: Sync configuration + lists: Sliding window API. A map of list key to list results. + account_data_request: The account_data extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + user_id = sync_config.user.to_string() + + # Skip if the extension is not enabled + if not account_data_request.enabled: + return None + + global_account_data_map: Mapping[str, JsonMapping] = {} + if from_token is not None: + global_account_data_map = ( + await self.store.get_updated_global_account_data_for_user( + user_id, from_token.stream_token.account_data_key + ) + ) + + have_push_rules_changed = await self.store.have_push_rules_changed_for_user( + user_id, from_token.stream_token.push_rules_key + ) + if have_push_rules_changed: + global_account_data_map = dict(global_account_data_map) + global_account_data_map[AccountDataTypes.PUSH_RULES] = ( + await self.push_rules_handler.push_rules_for_user(sync_config.user) + ) + else: + all_global_account_data = await self.store.get_global_account_data_for_user( + user_id + ) + + global_account_data_map = dict(all_global_account_data) + global_account_data_map[AccountDataTypes.PUSH_RULES] = ( + await self.push_rules_handler.push_rules_for_user(sync_config.user) + ) + + # We only want to include account data for rooms that are already in the sliding + # sync response AND that were requested in the account data request. + relevant_room_ids: Set[str] = set() + + # See what rooms from the room subscriptions we should get account data for + if ( + account_data_request.rooms is not None + and sync_config.room_subscriptions is not None + ): + actual_room_ids = sync_config.room_subscriptions.keys() + + for room_id in account_data_request.rooms: + # A wildcard means we process all rooms from the room subscriptions + if room_id == "*": + relevant_room_ids.update(sync_config.room_subscriptions.keys()) + break + + if room_id in actual_room_ids: + relevant_room_ids.add(room_id) + + # See what rooms from the sliding window lists we should get account data for + if account_data_request.lists is not None: + for list_key in account_data_request.lists: + # Just some typing because we share the variable name in multiple places + actual_list: Optional[SlidingSyncResult.SlidingWindowList] = None + + # A wildcard means we process rooms from all lists + if list_key == "*": + for actual_list in lists.values(): + # We only expect a single SYNC operation for any list + assert len(actual_list.ops) == 1 + sync_op = actual_list.ops[0] + assert sync_op.op == OperationType.SYNC + + relevant_room_ids.update(sync_op.room_ids) + + break + + actual_list = lists.get(list_key) + if actual_list is not None: + # We only expect a single SYNC operation for any list + assert len(actual_list.ops) == 1 + sync_op = actual_list.ops[0] + assert sync_op.op == OperationType.SYNC + + relevant_room_ids.update(sync_op.room_ids) + + # Fetch room account data + account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] = {} + if len(relevant_room_ids) > 0: + if from_token is not None: + account_data_by_room_map = ( + await self.store.get_updated_room_account_data_for_user( + user_id, from_token.stream_token.account_data_key + ) + ) + else: + account_data_by_room_map = ( + await self.store.get_room_account_data_for_user(user_id) + ) + + # Filter down to the relevant rooms + account_data_by_room_map = { + room_id: account_data_map + for room_id, account_data_map in account_data_by_room_map.items() + if room_id in relevant_room_ids + } + + return SlidingSyncResult.Extensions.AccountDataExtension( + global_account_data_map=global_account_data_map, + account_data_by_room_map=account_data_by_room_map, + ) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index d72dfa2b10..7cf1f56435 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -929,7 +929,6 @@ class SlidingSyncRestServlet(RestServlet): return 200, response_content - # TODO: Is there a better way to encode things? async def encode_response( self, requester: Requester, @@ -1117,6 +1116,24 @@ class SlidingSyncRestServlet(RestServlet): extensions.e2ee.device_list_updates.left ) + if extensions.account_data is not None: + serialized_extensions["account_data"] = { + # Same as the the top-level `account_data.events` field in Sync v2. + "global": [ + {"type": account_data_type, "content": content} + for account_data_type, content in extensions.account_data.global_account_data_map.items() + ], + # Same as the joined room's account_data field in Sync v2, e.g the path + # `rooms.join["!foo:bar"].account_data.events`. + "rooms": { + room_id: [ + {"type": account_data_type, "content": content} + for account_data_type, content in event_map.items() + ] + for room_id, event_map in extensions.account_data.account_data_by_room_map.items() + }, + } + return serialized_extensions diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 59eb0963ee..479222a18d 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -330,11 +330,31 @@ class SlidingSyncResult: or self.device_unused_fallback_key_types ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class AccountDataExtension: + """The Account Data extension (MSC3959) + + Attributes: + global_account_data_map: Mapping from `type` to `content` of global account + data events. + account_data_by_room_map: Mapping from room_id to mapping of `type` to + `content` of room account data events. + """ + + global_account_data_map: Mapping[str, JsonMapping] + account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] + + def __bool__(self) -> bool: + return bool( + self.global_account_data_map or self.account_data_by_room_map + ) + to_device: Optional[ToDeviceExtension] = None e2ee: Optional[E2eeExtension] = None + account_data: Optional[AccountDataExtension] = None def __bool__(self) -> bool: - return bool(self.to_device or self.e2ee) + return bool(self.to_device or self.e2ee or self.account_data) next_pos: SlidingSyncStreamToken lists: Dict[str, SlidingWindowList] diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index f3c45a0d6a..34e07ddac5 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -322,8 +322,26 @@ class SlidingSyncBody(RequestBodyModel): enabled: Optional[StrictBool] = False + class AccountDataExtension(RequestBodyModel): + """The Account Data extension (MSC3959) + + Attributes: + enabled + lists: List of list keys (from the Sliding Window API) to apply this + extension to. + rooms: List of room IDs (from the Room Subscription API) to apply this + extension to. + """ + + enabled: Optional[StrictBool] = False + # Process all lists defined in the Sliding Window API. (This is the default.) + lists: Optional[List[StrictStr]] = ["*"] + # Process all room subscriptions defined in the Room Subscription API. (This is the default.) + rooms: Optional[List[StrictStr]] = ["*"] + to_device: Optional[ToDeviceExtension] = None e2ee: Optional[E2eeExtension] = None + account_data: Optional[AccountDataExtension] = None # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 6c73f4ec33..135b677bad 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -5458,3 +5458,789 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): ), ["alg1"], ) + + +class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): + """Tests for the account_data sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + sendtodevice.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.e2e_keys_handler = hs.get_e2e_keys_handler() + self.account_data_handler = hs.get_account_data_handler() + self.notifier = hs.get_notifier() + self.sync_endpoint = ( + "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + ) + + def _bump_notifier_wait_for_events(self, user_id: str) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + """ + # We're expecting some new activity from this point onwards + from_token = self.event_sources.get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = self.notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Send a new To-Device message so that `notifier.wait_for_events(...)` wakes up. + # We're bumping to-device because it won't show up in the Sliding Sync response + # for this extension so it won't affect whether we have results. + sending_user_id = self.register_user( + "user_bump_notifier_wait_for_events", "pass" + ) + sending_user_tok = self.login(sending_user_id, "pass") + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user_id: {"d1": test_msg}}}, + access_token=sending_user_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + + def test_no_data_initial_sync(self) -> None: + """ + Test that enabling the account_data extension works during an intitial sync, + even if there is no-data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Make an initial Sliding Sync request with the account_data extension enabled + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertIncludes( + { + global_event["type"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + }, + # Even though we don't have any global account data set, Synapse saves some + # default push rules for us. + {AccountDataTypes.PUSH_RULES}, + exact=True, + ) + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_no_data_incremental_sync(self) -> None: + """ + Test that enabling account_data extension works during an incremental sync, even + if there is no-data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the account_data extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # There has been no account data changes since the `from_token` so we shouldn't + # see any account data here. + self.assertIncludes( + { + global_event["type"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + }, + set(), + exact=True, + ) + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_global_account_data_initial_sync(self) -> None: + """ + On initial sync, we should return all global account data on initial sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Update the global account data + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id=user1_id, + account_data_type="org.matrix.foobarbaz", + content={"foo": "bar"}, + ) + ) + + # Make an initial Sliding Sync request with the account_data extension enabled + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # It should show us all of the global account data + self.assertIncludes( + { + global_event["type"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + }, + {AccountDataTypes.PUSH_RULES, "org.matrix.foobarbaz"}, + exact=True, + ) + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_global_account_data_incremental_sync(self) -> None: + """ + On incremental sync, we should only account data that has changed since the + `from_token`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Add some global account data + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id=user1_id, + account_data_type="org.matrix.foobarbaz", + content={"foo": "bar"}, + ) + ) + + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Add some other global account data + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id=user1_id, + account_data_type="org.matrix.doodardaz", + content={"doo": "dar"}, + ) + ) + + # Make an incremental Sliding Sync request with the account_data extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertIncludes( + { + global_event["type"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + }, + # We should only see the new global account data that happened after the `from_token` + {"org.matrix.doodardaz"}, + exact=True, + ) + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_room_account_data_initial_sync(self) -> None: + """ + On initial sync, we return all account data for a given room but only for + rooms that we request and are being returned in the Sliding Sync response. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room and add some room account data + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + # Create another room with some room account data + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + # Make an initial Sliding Sync request with the account_data extension enabled + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "account_data": { + "enabled": True, + "rooms": [room_id1, room_id2], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertIsNotNone(response_body["extensions"]["account_data"].get("global")) + # Even though we requested room2, we only expect room1 to show up because that's + # the only room in the Sliding Sync response (room2 is not one of our room + # subscriptions or in a sliding window list). + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + self.assertIncludes( + { + event["type"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id1) + }, + {"org.matrix.roorarraz"}, + exact=True, + ) + + def test_room_account_data_incremental_sync(self) -> None: + """ + On incremental sync, we return all account data for a given room but only for + rooms that we request and are being returned in the Sliding Sync response. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room and add some room account data + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + # Create another room with some room account data + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "account_data": { + "enabled": True, + "rooms": [room_id1, room_id2], + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Add some other room account data + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + + # Make an incremental Sliding Sync request with the account_data extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertIsNotNone(response_body["extensions"]["account_data"].get("global")) + # Even though we requested room2, we only expect room1 to show up because that's + # the only room in the Sliding Sync response (room2 is not one of our room + # subscriptions or in a sliding window list). + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + # We should only see the new room account data that happened after the `from_token` + self.assertIncludes( + { + event["type"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id1) + }, + {"org.matrix.roorarraz2"}, + exact=True, + ) + + def test_room_account_data_relevant_rooms(self) -> None: + """ + Test out different variations of `lists`/`rooms` we are requesting account data for. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room and add some room account data + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + # Create another room with some room account data + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + # Create another room with some room account data + room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id3, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + # Create another room with some room account data + room_id4 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id4, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + # Create another room with some room account data + room_id5 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id5, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + room_id_to_human_name_map = { + room_id1: "room1", + room_id2: "room2", + room_id3: "room3", + room_id4: "room4", + room_id5: "room5", + } + + # Mix lists and rooms + sync_body = { + "lists": { + # We expect this list range to include room5 and room4 + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + }, + # We expect this list range to include room5, room4, room3 + "bar-list": { + "ranges": [[0, 2]], + "required_state": [], + "timeline_limit": 0, + }, + }, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "account_data": { + "enabled": True, + "lists": ["foo-list", "non-existent-list"], + "rooms": [room_id1, room_id2, "!non-existent-room"], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ✅ Requested via `rooms` and a room subscription exists + # room2: ❌ Requested via `rooms` but not in the response (from lists or room subscriptions) + # room3: ❌ Not requested + # room4: ✅ Shows up because requested via `lists` and list exists in the response + # room5: ✅ Shows up because requested via `lists` and list exists in the response + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"]["account_data"] + .get("rooms") + .keys() + }, + {"room1", "room4", "room5"}, + exact=True, + ) + + # Try wildcards (this is the default) + sync_body = { + "lists": { + # We expect this list range to include room5 and room4 + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + }, + # We expect this list range to include room5, room4, room3 + "bar-list": { + "ranges": [[0, 2]], + "required_state": [], + "timeline_limit": 0, + }, + }, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "account_data": { + "enabled": True, + # "lists": ["*"], + # "rooms": ["*"], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ✅ Shows up because of default `rooms` wildcard and is in one of the room subscriptions + # room2: ❌ Not requested + # room3: ✅ Shows up because of default `lists` wildcard and is in a list + # room4: ✅ Shows up because of default `lists` wildcard and is in a list + # room5: ✅ Shows up because of default `lists` wildcard and is in a list + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"]["account_data"] + .get("rooms") + .keys() + }, + {"room1", "room3", "room4", "room5"}, + exact=True, + ) + + # Empty list will return nothing + sync_body = { + "lists": { + # We expect this list range to include room5 and room4 + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + }, + # We expect this list range to include room5, room4, room3 + "bar-list": { + "ranges": [[0, 2]], + "required_state": [], + "timeline_limit": 0, + }, + }, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "account_data": { + "enabled": True, + "lists": [], + "rooms": [], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ❌ Not requested + # room2: ❌ Not requested + # room3: ❌ Not requested + # room4: ❌ Not requested + # room5: ❌ Not requested + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"]["account_data"] + .get("rooms") + .keys() + }, + set(), + exact=True, + ) + + # Try wildcard and none + sync_body = { + "lists": { + # We expect this list range to include room5 and room4 + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + }, + # We expect this list range to include room5, room4, room3 + "bar-list": { + "ranges": [[0, 2]], + "required_state": [], + "timeline_limit": 0, + }, + }, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "account_data": { + "enabled": True, + "lists": ["*"], + "rooms": [], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ❌ Not requested + # room2: ❌ Not requested + # room3: ✅ Shows up because of default `lists` wildcard and is in a list + # room4: ✅ Shows up because of default `lists` wildcard and is in a list + # room5: ✅ Shows up because of default `lists` wildcard and is in a list + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"]["account_data"] + .get("rooms") + .keys() + }, + {"room3", "room4", "room5"}, + exact=True, + ) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the account_data extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the global account data to trigger new results + self.get_success( + self.account_data_handler.add_account_data_for_user( + user1_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the global account data update + self.assertIncludes( + { + global_event["type"] + for global_event in channel.json_body["extensions"]["account_data"].get( + "global" + ) + }, + {"org.matrix.foobarbaz"}, + exact=True, + ) + self.assertIncludes( + channel.json_body["extensions"]["account_data"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the account_data extension doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events(user1_id) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + self.assertIsNotNone( + channel.json_body["extensions"]["account_data"].get("global") + ) + self.assertIsNotNone( + channel.json_body["extensions"]["account_data"].get("rooms") + ) From 69ac4b6a6e30f41e4238938a554c5f98a614d77f Mon Sep 17 00:00:00 2001 From: YLong Shi Date: Thu, 25 Jul 2024 19:07:44 +0800 Subject: [PATCH 070/332] Update config_documentation - Change example of msisdn in allowed_local_3pids (#17476) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17476.doc | 1 + docs/usage/configuration/config_documentation.md | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17476.doc diff --git a/changelog.d/17476.doc b/changelog.d/17476.doc new file mode 100644 index 0000000000..89d8d490bb --- /dev/null +++ b/changelog.d/17476.doc @@ -0,0 +1 @@ +Update the [`allowed_local_3pids`](https://element-hq.github.io/synapse/v1.112/usage/configuration/config_documentation.html#allowed_local_3pids) config option's msisdn address to a working example. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 649f4f71c7..40f64be856 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2386,7 +2386,7 @@ enable_registration_without_verification: true --- ### `registrations_require_3pid` -If this is set, users must provide all of the specified types of 3PID when registering an account. +If this is set, users must provide all of the specified types of [3PID](https://spec.matrix.org/latest/appendices/#3pid-types) when registering an account. Note that [`enable_registration`](#enable_registration) must also be set to allow account registration. @@ -2411,6 +2411,9 @@ disable_msisdn_registration: true Mandate that users are only allowed to associate certain formats of 3PIDs with accounts on this server, as specified by the `medium` and `pattern` sub-options. +`pattern` is a [Perl-like regular expression](https://docs.python.org/3/library/re.html#module-re). + +More information about 3PIDs, allowed `medium` types and their `address` syntax can be found [in the Matrix spec](https://spec.matrix.org/latest/appendices/#3pid-types). Example configuration: ```yaml @@ -2420,7 +2423,7 @@ allowed_local_3pids: - medium: email pattern: '^[^@]+@vector\.im$' - medium: msisdn - pattern: '\+44' + pattern: '^44\d{10}$' ``` --- ### `enable_3pid_lookup` From ebbabfe7827552cf71a1a8364c83d4aabb043df9 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 25 Jul 2024 10:43:35 -0500 Subject: [PATCH 071/332] Refactor Sliding Sync tests to better utilize the `SlidingSyncBase` (pt. 1) (#17481) `SlidingSyncBase` for tests was first introduced in https://github.com/element-hq/synapse/pull/17452 Part 2: https://github.com/element-hq/synapse/pull/17482 --- changelog.d/17481.misc | 1 + tests/rest/client/test_sync.py | 334 +++++++++++---------------------- 2 files changed, 107 insertions(+), 228 deletions(-) create mode 100644 changelog.d/17481.misc diff --git a/changelog.d/17481.misc b/changelog.d/17481.misc new file mode 100644 index 0000000000..ac55538424 --- /dev/null +++ b/changelog.d/17481.misc @@ -0,0 +1 @@ +Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 135b677bad..a9f2b274aa 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -21,7 +21,7 @@ import json import logging from http import HTTPStatus -from typing import Any, Dict, Iterable, List, Optional, Tuple +from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple from parameterized import parameterized, parameterized_class @@ -60,6 +60,7 @@ from synapse.types import ( UserID, ) from synapse.util import Clock +from synapse.util.stringutils import random_string from tests import unittest from tests.federation.transport.test_knocking import ( @@ -1238,6 +1239,12 @@ class SlidingSyncBase(unittest.HomeserverTestCase): sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + def do_sync( self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str ) -> Tuple[JsonDict, str]: @@ -1268,6 +1275,88 @@ class SlidingSyncBase(unittest.HomeserverTestCase): return channel.json_body, channel.json_body["pos"] + def _bump_notifier_wait_for_events( + self, + user_id: str, + wake_stream_key: Literal[ + StreamKeyType.ACCOUNT_DATA, + StreamKeyType.PRESENCE, + ], + ) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + + Args: + user_id: The user ID to wake up the notifier for + wake_stream_key: The stream key to wake up. This will create an actual new + entity in that stream so it's best to choose one that won't affect the + Sliding Sync results you're testing for. In other words, if your testing + account data, choose `StreamKeyType.PRESENCE` instead. We support two + possible stream keys because you're probably testing one or the other so + one is always a "safe" option. + """ + # We're expecting some new activity from this point onwards + from_token = self.hs.get_event_sources().get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + notifier = self.hs.get_notifier() + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data or presence so that `notifier.wait_for_events(...)` + # wakes up. We chose these two options because they're least likely to show up + # in the Sliding Sync response so it won't affect whether we have results. + if wake_stream_key == StreamKeyType.ACCOUNT_DATA: + self.get_success( + self.hs.get_account_data_handler().add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + elif wake_stream_key == StreamKeyType.PRESENCE: + sending_user_id = self.register_user( + "user_bump_notifier_wait_for_events_" + random_string(10), "pass" + ) + sending_user_tok = self.login(sending_user_id, "pass") + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user_id: {"d1": test_msg}}}, + access_token=sending_user_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + else: + raise AssertionError( + "Unable to wake that stream in _bump_notifier_wait_for_events(...)" + ) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + class SlidingSyncTestCase(SlidingSyncBase): """ @@ -1282,18 +1371,10 @@ class SlidingSyncTestCase(SlidingSyncBase): devices.register_servlets, ] - def default_config(self) -> JsonDict: - config = super().default_config() - # Enable sliding sync - config["experimental_features"] = {"msc3575_enabled": True} - return config - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.event_sources = hs.get_event_sources() self.storage_controllers = hs.get_storage_controllers() - self.account_data_handler = hs.get_account_data_handler() - self.notifier = hs.get_notifier() def _assertRequiredStateIncludes( self, @@ -1419,52 +1500,6 @@ class SlidingSyncTestCase(SlidingSyncBase): return room_id - def _bump_notifier_wait_for_events(self, user_id: str) -> None: - """ - Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding - Sync results. - """ - # We're expecting some new activity from this point onwards - from_token = self.event_sources.get_current_token() - - triggered_notifier_wait_for_events = False - - async def _on_new_acivity( - before_token: StreamToken, after_token: StreamToken - ) -> bool: - nonlocal triggered_notifier_wait_for_events - triggered_notifier_wait_for_events = True - return True - - # Listen for some new activity for the user. We're just trying to confirm that - # our bump below actually does what we think it does (triggers new activity for - # the user). - result_awaitable = self.notifier.wait_for_events( - user_id, - 1000, - _on_new_acivity, - from_token=from_token, - ) - - # Update the account data so that `notifier.wait_for_events(...)` wakes up. - # We're bumping account data because it won't show up in the Sliding Sync - # response so it won't affect whether we have results. - self.get_success( - self.account_data_handler.add_account_data_for_user( - user_id, - "org.matrix.foobarbaz", - {"foo": "bar"}, - ) - ) - - # Wait for our notifier result - self.get_success(result_awaitable) - - if not triggered_notifier_wait_for_events: - raise AssertionError( - "Expected `notifier.wait_for_events(...)` to be triggered" - ) - def test_sync_list(self) -> None: """ Test that room IDs show up in the Sliding Sync `lists` @@ -1671,7 +1706,9 @@ class SlidingSyncTestCase(SlidingSyncBase): channel.await_result(timeout_ms=5000) # Wake-up `notifier.wait_for_events(...)` that will cause us test # `SlidingSyncResult.__bool__` for new results. - self._bump_notifier_wait_for_events(user1_id) + self._bump_notifier_wait_for_events( + user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA + ) # Block for a little bit more to ensure we don't see any new results. with self.assertRaises(TimedOutException): channel.await_result(timeout_ms=4000) @@ -4636,67 +4673,12 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): sendtodevice.register_servlets, ] - def default_config(self) -> JsonDict: - config = super().default_config() - # Enable sliding sync - config["experimental_features"] = {"msc3575_enabled": True} - return config - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main - self.event_sources = hs.get_event_sources() - self.account_data_handler = hs.get_account_data_handler() - self.notifier = hs.get_notifier() self.sync_endpoint = ( "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" ) - def _bump_notifier_wait_for_events(self, user_id: str) -> None: - """ - Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding - Sync results. - """ - # We're expecting some new activity from this point onwards - from_token = self.event_sources.get_current_token() - - triggered_notifier_wait_for_events = False - - async def _on_new_acivity( - before_token: StreamToken, after_token: StreamToken - ) -> bool: - nonlocal triggered_notifier_wait_for_events - triggered_notifier_wait_for_events = True - return True - - # Listen for some new activity for the user. We're just trying to confirm that - # our bump below actually does what we think it does (triggers new activity for - # the user). - result_awaitable = self.notifier.wait_for_events( - user_id, - 1000, - _on_new_acivity, - from_token=from_token, - ) - - # Update the account data so that `notifier.wait_for_events(...)` wakes up. - # We're bumping account data because it won't show up in the Sliding Sync - # response so it won't affect whether we have results. - self.get_success( - self.account_data_handler.add_account_data_for_user( - user_id, - "org.matrix.foobarbaz", - {"foo": "bar"}, - ) - ) - - # Wait for our notifier result - self.get_success(result_awaitable) - - if not triggered_notifier_wait_for_events: - raise AssertionError( - "Expected `notifier.wait_for_events(...)` to be triggered" - ) - def _assert_to_device_response( self, channel: FakeChannel, expected_messages: List[JsonDict] ) -> str: @@ -4945,7 +4927,9 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): channel.await_result(timeout_ms=5000) # Wake-up `notifier.wait_for_events(...)` that will cause us test # `SlidingSyncResult.__bool__` for new results. - self._bump_notifier_wait_for_events(user1_id) + self._bump_notifier_wait_for_events( + user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA + ) # Block for a little bit more to ensure we don't see any new results. with self.assertRaises(TimedOutException): channel.await_result(timeout_ms=4000) @@ -4968,68 +4952,13 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): devices.register_servlets, ] - def default_config(self) -> JsonDict: - config = super().default_config() - # Enable sliding sync - config["experimental_features"] = {"msc3575_enabled": True} - return config - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main - self.event_sources = hs.get_event_sources() self.e2e_keys_handler = hs.get_e2e_keys_handler() - self.account_data_handler = hs.get_account_data_handler() - self.notifier = hs.get_notifier() self.sync_endpoint = ( "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" ) - def _bump_notifier_wait_for_events(self, user_id: str) -> None: - """ - Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding - Sync results. - """ - # We're expecting some new activity from this point onwards - from_token = self.event_sources.get_current_token() - - triggered_notifier_wait_for_events = False - - async def _on_new_acivity( - before_token: StreamToken, after_token: StreamToken - ) -> bool: - nonlocal triggered_notifier_wait_for_events - triggered_notifier_wait_for_events = True - return True - - # Listen for some new activity for the user. We're just trying to confirm that - # our bump below actually does what we think it does (triggers new activity for - # the user). - result_awaitable = self.notifier.wait_for_events( - user_id, - 1000, - _on_new_acivity, - from_token=from_token, - ) - - # Update the account data so that `notifier.wait_for_events(...)` wakes up. - # We're bumping account data because it won't show up in the Sliding Sync - # response so it won't affect whether we have results. - self.get_success( - self.account_data_handler.add_account_data_for_user( - user_id, - "org.matrix.foobarbaz", - {"foo": "bar"}, - ) - ) - - # Wait for our notifier result - self.get_success(result_awaitable) - - if not triggered_notifier_wait_for_events: - raise AssertionError( - "Expected `notifier.wait_for_events(...)` to be triggered" - ) - def test_no_data_initial_sync(self) -> None: """ Test that enabling e2ee extension works during an intitial sync, even if there @@ -5231,7 +5160,9 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): channel.await_result(timeout_ms=5000) # Wake-up `notifier.wait_for_events(...)` that will cause us test # `SlidingSyncResult.__bool__` for new results. - self._bump_notifier_wait_for_events(user1_id) + self._bump_notifier_wait_for_events( + user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA + ) # Block for a little bit more to ensure we don't see any new results. with self.assertRaises(TimedOutException): channel.await_result(timeout_ms=4000) @@ -5471,73 +5402,14 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): sendtodevice.register_servlets, ] - def default_config(self) -> JsonDict: - config = super().default_config() - # Enable sliding sync - config["experimental_features"] = {"msc3575_enabled": True} - return config - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main - self.event_sources = hs.get_event_sources() self.e2e_keys_handler = hs.get_e2e_keys_handler() self.account_data_handler = hs.get_account_data_handler() - self.notifier = hs.get_notifier() self.sync_endpoint = ( "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" ) - def _bump_notifier_wait_for_events(self, user_id: str) -> None: - """ - Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding - Sync results. - """ - # We're expecting some new activity from this point onwards - from_token = self.event_sources.get_current_token() - - triggered_notifier_wait_for_events = False - - async def _on_new_acivity( - before_token: StreamToken, after_token: StreamToken - ) -> bool: - nonlocal triggered_notifier_wait_for_events - triggered_notifier_wait_for_events = True - return True - - # Listen for some new activity for the user. We're just trying to confirm that - # our bump below actually does what we think it does (triggers new activity for - # the user). - result_awaitable = self.notifier.wait_for_events( - user_id, - 1000, - _on_new_acivity, - from_token=from_token, - ) - - # Send a new To-Device message so that `notifier.wait_for_events(...)` wakes up. - # We're bumping to-device because it won't show up in the Sliding Sync response - # for this extension so it won't affect whether we have results. - sending_user_id = self.register_user( - "user_bump_notifier_wait_for_events", "pass" - ) - sending_user_tok = self.login(sending_user_id, "pass") - test_msg = {"foo": "bar"} - chan = self.make_request( - "PUT", - "/_matrix/client/r0/sendToDevice/m.test/1234", - content={"messages": {user_id: {"d1": test_msg}}}, - access_token=sending_user_tok, - ) - self.assertEqual(chan.code, 200, chan.result) - - # Wait for our notifier result - self.get_success(result_awaitable) - - if not triggered_notifier_wait_for_events: - raise AssertionError( - "Expected `notifier.wait_for_events(...)` to be triggered" - ) - def test_no_data_initial_sync(self) -> None: """ Test that enabling the account_data extension works during an intitial sync, @@ -6229,7 +6101,13 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): channel.await_result(timeout_ms=5000) # Wake-up `notifier.wait_for_events(...)` that will cause us test # `SlidingSyncResult.__bool__` for new results. - self._bump_notifier_wait_for_events(user1_id) + self._bump_notifier_wait_for_events( + user1_id, + # We choose `StreamKeyType.PRESENCE` because we're testing for account data + # and don't want to contaminate the account data results using + # `StreamKeyType.ACCOUNT_DATA`. + wake_stream_key=StreamKeyType.PRESENCE, + ) # Block for a little bit more to ensure we don't see any new results. with self.assertRaises(TimedOutException): channel.await_result(timeout_ms=4000) From 568051c0f07393b786b9d813a1db53dd332c9fc2 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 25 Jul 2024 11:01:47 -0500 Subject: [PATCH 072/332] Refactor Sliding Sync tests to better utilize the `SlidingSyncBase.do_sync(...)` (pt. 2) (#17482) `SlidingSyncBase.do_sync()` for tests was first introduced in https://github.com/element-hq/synapse/pull/17452 Part 1: https://github.com/element-hq/synapse/pull/17481 --- changelog.d/17482.misc | 1 + tests/rest/client/test_sync.py | 1746 +++++++++++++------------------- 2 files changed, 726 insertions(+), 1021 deletions(-) create mode 100644 changelog.d/17482.misc diff --git a/changelog.d/17482.misc b/changelog.d/17482.misc new file mode 100644 index 0000000000..ac55538424 --- /dev/null +++ b/changelog.d/17482.misc @@ -0,0 +1 @@ +Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index a9f2b274aa..2bbbd95a76 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -66,7 +66,7 @@ from tests import unittest from tests.federation.transport.test_knocking import ( KnockingStrippedStateEventHelperMixin, ) -from tests.server import FakeChannel, TimedOutException +from tests.server import TimedOutException from tests.test_utils.event_injection import create_event, mark_event_as_partial_state from tests.unittest import skip_unless @@ -1504,44 +1504,37 @@ class SlidingSyncTestCase(SlidingSyncBase): """ Test that room IDs show up in the Sliding Sync `lists` """ - alice_user_id = self.register_user("alice", "correcthorse") - alice_access_token = self.login(alice_user_id, "correcthorse") + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") - room_id = self.helper.create_room_as( - alice_user_id, tok=alice_access_token, is_public=True - ) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 99]], - "required_state": [ - ["m.room.join_rules", ""], - ["m.room.history_visibility", ""], - ["m.space.child", "*"], - ], - "timeline_limit": 1, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [ + ["m.room.join_rules", ""], + ["m.room.history_visibility", ""], + ["m.space.child", "*"], + ], + "timeline_limit": 1, } - }, - access_token=alice_access_token, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Make sure it has the foo-list we requested self.assertListEqual( - list(channel.json_body["lists"].keys()), + list(response_body["lists"].keys()), ["foo-list"], - channel.json_body["lists"].keys(), + response_body["lists"].keys(), ) # Make sure the list includes the room we are joined to self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), + list(response_body["lists"]["foo-list"]["ops"]), [ { "op": "SYNC", @@ -1549,15 +1542,15 @@ class SlidingSyncTestCase(SlidingSyncBase): "room_ids": [room_id], } ], - channel.json_body["lists"]["foo-list"], + response_body["lists"]["foo-list"], ) def test_wait_for_sync_token(self) -> None: """ Test that worker will wait until it catches up to the given token """ - alice_user_id = self.register_user("alice", "correcthorse") - alice_access_token = self.login(alice_user_id, "correcthorse") + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") # Create a future token that will cause us to wait. Since we never send a new # event to reach that future stream_ordering, the worker will wait until the @@ -1575,23 +1568,24 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [ + ["m.room.join_rules", ""], + ["m.room.history_visibility", ""], + ["m.space.child", "*"], + ], + "timeline_limit": 1, + } + } + } channel = self.make_request( "POST", self.sync_endpoint + f"?pos={future_position_token_serialized}", - { - "lists": { - "foo-list": { - "ranges": [[0, 99]], - "required_state": [ - ["m.room.join_rules", ""], - ["m.room.history_visibility", ""], - ["m.space.child", "*"], - ], - "timeline_limit": 1, - } - } - }, - access_token=alice_access_token, + content=sync_body, + access_token=user1_tok, await_result=False, ) # Block for 10 seconds to make `notifier.wait_for_stream_token(from_token)` @@ -1681,23 +1675,22 @@ class SlidingSyncTestCase(SlidingSyncBase): room_id = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id, user1_id, tok=user1_tok) - from_token = self.event_sources.get_current_token() + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Make the Sliding Sync request channel = self.make_request( "POST", - self.sync_endpoint - + "?timeout=10000" - + f"&pos={self.get_success(from_token.to_string(self.store))}", - { - "lists": { - "foo-list": { - "ranges": [[0, 0]], - "required_state": [], - "timeline_limit": 1, - } - } - }, + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, access_token=user1_tok, await_result=False, ) @@ -1758,55 +1751,50 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - # Absense of filters does not imply "False" values - "all": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {}, - }, - # Test single truthy filter - "dms": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": True}, - }, - # Test single falsy filter - "non-dms": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": False}, - }, - # Test how multiple filters should stack (AND'd together) - "room-invites": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": False, "is_invite": True}, - }, - } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + sync_body = { + "lists": { + # Absense of filters does not imply "False" values + "all": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {}, + }, + # Test single truthy filter + "dms": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": True}, + }, + # Test single falsy filter + "non-dms": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": False}, + }, + # Test how multiple filters should stack (AND'd together) + "room-invites": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": False, "is_invite": True}, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Make sure it has the foo-list we requested self.assertListEqual( - list(channel.json_body["lists"].keys()), + list(response_body["lists"].keys()), ["all", "dms", "non-dms", "room-invites"], - channel.json_body["lists"].keys(), + response_body["lists"].keys(), ) # Make sure the lists have the correct rooms self.assertListEqual( - list(channel.json_body["lists"]["all"]["ops"]), + list(response_body["lists"]["all"]["ops"]), [ { "op": "SYNC", @@ -1819,10 +1807,10 @@ class SlidingSyncTestCase(SlidingSyncBase): ], } ], - list(channel.json_body["lists"]["all"]), + list(response_body["lists"]["all"]), ) self.assertListEqual( - list(channel.json_body["lists"]["dms"]["ops"]), + list(response_body["lists"]["dms"]["ops"]), [ { "op": "SYNC", @@ -1830,10 +1818,10 @@ class SlidingSyncTestCase(SlidingSyncBase): "room_ids": [invited_dm_room_id, joined_dm_room_id], } ], - list(channel.json_body["lists"]["dms"]), + list(response_body["lists"]["dms"]), ) self.assertListEqual( - list(channel.json_body["lists"]["non-dms"]["ops"]), + list(response_body["lists"]["non-dms"]["ops"]), [ { "op": "SYNC", @@ -1841,10 +1829,10 @@ class SlidingSyncTestCase(SlidingSyncBase): "room_ids": [invite_room_id, room_id], } ], - list(channel.json_body["lists"]["non-dms"]), + list(response_body["lists"]["non-dms"]), ) self.assertListEqual( - list(channel.json_body["lists"]["room-invites"]["ops"]), + list(response_body["lists"]["room-invites"]["ops"]), [ { "op": "SYNC", @@ -1852,14 +1840,14 @@ class SlidingSyncTestCase(SlidingSyncBase): "room_ids": [invite_room_id], } ], - list(channel.json_body["lists"]["room-invites"]), + list(response_body["lists"]["room-invites"]), ) # Ensure DM's are correctly marked self.assertDictEqual( { room_id: room.get("is_dm") - for room_id, room in channel.json_body["rooms"].items() + for room_id, room in response_body["rooms"].items() }, { invite_room_id: None, @@ -1886,36 +1874,31 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id2, "activity in room2", tok=user1_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 99]], - "required_state": [ - ["m.room.join_rules", ""], - ["m.room.history_visibility", ""], - ["m.space.child", "*"], - ], - "timeline_limit": 1, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [ + ["m.room.join_rules", ""], + ["m.room.history_visibility", ""], + ["m.space.child", "*"], + ], + "timeline_limit": 1, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Make sure it has the foo-list we requested self.assertListEqual( - list(channel.json_body["lists"].keys()), + list(response_body["lists"].keys()), ["foo-list"], - channel.json_body["lists"].keys(), + response_body["lists"].keys(), ) # Make sure the list is sorted in the way we expect self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), + list(response_body["lists"]["foo-list"]["ops"]), [ { "op": "SYNC", @@ -1923,7 +1906,7 @@ class SlidingSyncTestCase(SlidingSyncBase): "room_ids": [room_id2, room_id1, room_id3], } ], - channel.json_body["lists"]["foo-list"], + response_body["lists"]["foo-list"], ) def test_sliced_windows(self) -> None: @@ -1939,35 +1922,26 @@ class SlidingSyncTestCase(SlidingSyncBase): room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) # Make the Sliding Sync request for a single room - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 0]], - "required_state": [ - ["m.room.join_rules", ""], - ["m.room.history_visibility", ""], - ["m.space.child", "*"], - ], - "timeline_limit": 1, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Make sure it has the foo-list we requested self.assertListEqual( - list(channel.json_body["lists"].keys()), + list(response_body["lists"].keys()), ["foo-list"], - channel.json_body["lists"].keys(), + response_body["lists"].keys(), ) # Make sure the list is sorted in the way we expect self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), + list(response_body["lists"]["foo-list"]["ops"]), [ { "op": "SYNC", @@ -1975,39 +1949,30 @@ class SlidingSyncTestCase(SlidingSyncBase): "room_ids": [room_id3], } ], - channel.json_body["lists"]["foo-list"], + response_body["lists"]["foo-list"], ) # Make the Sliding Sync request for the first two rooms - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - ["m.room.join_rules", ""], - ["m.room.history_visibility", ""], - ["m.space.child", "*"], - ], - "timeline_limit": 1, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 1, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Make sure it has the foo-list we requested self.assertListEqual( - list(channel.json_body["lists"].keys()), + list(response_body["lists"].keys()), ["foo-list"], - channel.json_body["lists"].keys(), + response_body["lists"].keys(), ) # Make sure the list is sorted in the way we expect self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), + list(response_body["lists"]["foo-list"]["ops"]), [ { "op": "SYNC", @@ -2015,7 +1980,7 @@ class SlidingSyncTestCase(SlidingSyncBase): "room_ids": [room_id3, room_id2], } ], - channel.json_body["lists"]["foo-list"], + response_body["lists"]["foo-list"], ) def test_rooms_meta_when_joined(self) -> None: @@ -2046,43 +2011,38 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.join(room_id1, user1_id, tok=user1_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Reflect the current state of the room self.assertEqual( - channel.json_body["rooms"][room_id1]["name"], + response_body["rooms"][room_id1]["name"], "my super room", - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) self.assertEqual( - channel.json_body["rooms"][room_id1]["avatar"], + response_body["rooms"][room_id1]["avatar"], "mxc://DUMMY_MEDIA_ID", - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) self.assertEqual( - channel.json_body["rooms"][room_id1]["joined_count"], + response_body["rooms"][room_id1]["joined_count"], 2, ) self.assertEqual( - channel.json_body["rooms"][room_id1]["invited_count"], + response_body["rooms"][room_id1]["invited_count"], 0, ) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("is_dm"), + response_body["rooms"][room_id1].get("is_dm"), ) def test_rooms_meta_when_invited(self) -> None: @@ -2129,44 +2089,39 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # This should still reflect the current state of the room even when the user is # invited. self.assertEqual( - channel.json_body["rooms"][room_id1]["name"], + response_body["rooms"][room_id1]["name"], "my super duper room", - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) self.assertEqual( - channel.json_body["rooms"][room_id1]["avatar"], + response_body["rooms"][room_id1]["avatar"], "mxc://UPDATED_DUMMY_MEDIA_ID", - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) self.assertEqual( - channel.json_body["rooms"][room_id1]["joined_count"], + response_body["rooms"][room_id1]["joined_count"], 1, ) self.assertEqual( - channel.json_body["rooms"][room_id1]["invited_count"], + response_body["rooms"][room_id1]["invited_count"], 1, ) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("is_dm"), + response_body["rooms"][room_id1].get("is_dm"), ) def test_rooms_meta_when_banned(self) -> None: @@ -2213,45 +2168,40 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Reflect the state of the room at the time of leaving self.assertEqual( - channel.json_body["rooms"][room_id1]["name"], + response_body["rooms"][room_id1]["name"], "my super room", - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) self.assertEqual( - channel.json_body["rooms"][room_id1]["avatar"], + response_body["rooms"][room_id1]["avatar"], "mxc://DUMMY_MEDIA_ID", - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) self.assertEqual( - channel.json_body["rooms"][room_id1]["joined_count"], + response_body["rooms"][room_id1]["joined_count"], # FIXME: The actual number should be "1" (user2) but we currently don't # support this for rooms where the user has left/been banned. 0, ) self.assertEqual( - channel.json_body["rooms"][room_id1]["invited_count"], + response_body["rooms"][room_id1]["invited_count"], 0, ) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("is_dm"), + response_body["rooms"][room_id1].get("is_dm"), ) def test_rooms_meta_heroes(self) -> None: @@ -2291,61 +2241,56 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.invite(room_id2, src=user2_id, targ=user3_id, tok=user2_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Room1 has a name so we shouldn't see any `heroes` which the client would use # the calculate the room name themselves. self.assertEqual( - channel.json_body["rooms"][room_id1]["name"], + response_body["rooms"][room_id1]["name"], "my super room", - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("heroes")) + self.assertIsNone(response_body["rooms"][room_id1].get("heroes")) self.assertEqual( - channel.json_body["rooms"][room_id1]["joined_count"], + response_body["rooms"][room_id1]["joined_count"], 2, ) self.assertEqual( - channel.json_body["rooms"][room_id1]["invited_count"], + response_body["rooms"][room_id1]["invited_count"], 1, ) # Room2 doesn't have a name so we should see `heroes` populated - self.assertIsNone(channel.json_body["rooms"][room_id2].get("name")) + self.assertIsNone(response_body["rooms"][room_id2].get("name")) self.assertCountEqual( [ hero["user_id"] - for hero in channel.json_body["rooms"][room_id2].get("heroes", []) + for hero in response_body["rooms"][room_id2].get("heroes", []) ], # Heroes shouldn't include the user themselves (we shouldn't see user1) [user2_id, user3_id], ) self.assertEqual( - channel.json_body["rooms"][room_id2]["joined_count"], + response_body["rooms"][room_id2]["joined_count"], 2, ) self.assertEqual( - channel.json_body["rooms"][room_id2]["invited_count"], + response_body["rooms"][room_id2]["invited_count"], 1, ) # We didn't request any state so we shouldn't see any `required_state` - self.assertIsNone(channel.json_body["rooms"][room_id1].get("required_state")) - self.assertIsNone(channel.json_body["rooms"][room_id2].get("required_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + self.assertIsNone(response_body["rooms"][room_id2].get("required_state")) def test_rooms_meta_heroes_max(self) -> None: """ @@ -2384,44 +2329,39 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.join(room_id1, user7_id, tok=user7_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Room2 doesn't have a name so we should see `heroes` populated - self.assertIsNone(channel.json_body["rooms"][room_id1].get("name")) + self.assertIsNone(response_body["rooms"][room_id1].get("name")) self.assertCountEqual( [ hero["user_id"] - for hero in channel.json_body["rooms"][room_id1].get("heroes", []) + for hero in response_body["rooms"][room_id1].get("heroes", []) ], # Heroes should be the first 5 users in the room (excluding the user # themselves, we shouldn't see `user1`) [user2_id, user3_id, user4_id, user5_id, user6_id], ) self.assertEqual( - channel.json_body["rooms"][room_id1]["joined_count"], + response_body["rooms"][room_id1]["joined_count"], 7, ) self.assertEqual( - channel.json_body["rooms"][room_id1]["invited_count"], + response_body["rooms"][room_id1]["invited_count"], 0, ) # We didn't request any state so we shouldn't see any `required_state` - self.assertIsNone(channel.json_body["rooms"][room_id1].get("required_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) def test_rooms_meta_heroes_when_banned(self) -> None: """ @@ -2462,28 +2402,23 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Room2 doesn't have a name so we should see `heroes` populated - self.assertIsNone(channel.json_body["rooms"][room_id1].get("name")) + self.assertIsNone(response_body["rooms"][room_id1].get("name")) self.assertCountEqual( [ hero["user_id"] - for hero in channel.json_body["rooms"][room_id1].get("heroes", []) + for hero in response_body["rooms"][room_id1].get("heroes", []) ], # Heroes shouldn't include the user themselves (we shouldn't see user1). We # also shouldn't see user4 since they joined after user1 was banned. @@ -2494,13 +2429,13 @@ class SlidingSyncTestCase(SlidingSyncBase): ) self.assertEqual( - channel.json_body["rooms"][room_id1]["joined_count"], + response_body["rooms"][room_id1]["joined_count"], # FIXME: The actual number should be "1" (user2) but we currently don't # support this for rooms where the user has left/been banned. 0, ) self.assertEqual( - channel.json_body["rooms"][room_id1]["invited_count"], + response_body["rooms"][room_id1]["invited_count"], # We shouldn't see user5 since they were invited after user1 was banned. # # FIXME: The actual number should be "1" (user3) but we currently don't @@ -2533,46 +2468,41 @@ class SlidingSyncTestCase(SlidingSyncBase): user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 3, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 3, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # We expect to saturate the `timeline_limit` (there are more than 3 messages in the room) self.assertEqual( - channel.json_body["rooms"][room_id1]["limited"], + response_body["rooms"][room_id1]["limited"], True, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) # Check to make sure the latest events are returned self.assertEqual( [ event["event_id"] - for event in channel.json_body["rooms"][room_id1]["timeline"] + for event in response_body["rooms"][room_id1]["timeline"] ], [ event_response4["event_id"], event_response5["event_id"], user1_join_response["event_id"], ], - channel.json_body["rooms"][room_id1]["timeline"], + response_body["rooms"][room_id1]["timeline"], ) # Check to make sure the `prev_batch` points at the right place prev_batch_token = self.get_success( StreamToken.from_string( - self.store, channel.json_body["rooms"][room_id1]["prev_batch"] + self.store, response_body["rooms"][room_id1]["prev_batch"] ) ) prev_batch_room_stream_token_serialized = self.get_success( @@ -2596,9 +2526,9 @@ class SlidingSyncTestCase(SlidingSyncBase): # With no `from_token` (initial sync), it's all historical since there is no # "live" range self.assertEqual( - channel.json_body["rooms"][room_id1]["num_live"], + response_body["rooms"][room_id1]["num_live"], 0, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) def test_rooms_not_limited_initial_sync(self) -> None: @@ -2619,44 +2549,39 @@ class SlidingSyncTestCase(SlidingSyncBase): # Make the Sliding Sync request timeline_limit = 100 - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": timeline_limit, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": timeline_limit, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # The timeline should be `limited=False` because we have all of the events (no # more to paginate to) self.assertEqual( - channel.json_body["rooms"][room_id1]["limited"], + response_body["rooms"][room_id1]["limited"], False, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) expected_number_of_events = 9 # We're just looking to make sure we got all of the events before hitting the `timeline_limit` self.assertEqual( - len(channel.json_body["rooms"][room_id1]["timeline"]), + len(response_body["rooms"][room_id1]["timeline"]), expected_number_of_events, - channel.json_body["rooms"][room_id1]["timeline"], + response_body["rooms"][room_id1]["timeline"], ) self.assertLessEqual(expected_number_of_events, timeline_limit) # With no `from_token` (initial sync), it's all historical since there is no # "live" token range. self.assertEqual( - channel.json_body["rooms"][room_id1]["num_live"], + response_body["rooms"][room_id1]["num_live"], 0, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) def test_rooms_incremental_sync(self) -> None: @@ -2674,7 +2599,7 @@ class SlidingSyncTestCase(SlidingSyncBase): # Make an initial Sliding Sync request to grab a token. This is also a sanity # check that we can go from initial to incremental sync. - sync_params = { + sync_body = { "lists": { "foo-list": { "ranges": [[0, 1]], @@ -2683,14 +2608,7 @@ class SlidingSyncTestCase(SlidingSyncBase): } } } - channel = self.make_request( - "POST", - self.sync_endpoint, - sync_params, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - next_pos = channel.json_body["pos"] + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Send some events but don't send enough to saturate the `timeline_limit`. # We want to later test that we only get the new events since the `next_pos` @@ -2698,41 +2616,35 @@ class SlidingSyncTestCase(SlidingSyncBase): event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok) # Make an incremental Sliding Sync request (what we're trying to test) - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={next_pos}", - sync_params, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # We only expect to see the new events since the last sync which isn't enough to # fill up the `timeline_limit`. self.assertEqual( - channel.json_body["rooms"][room_id1]["limited"], + response_body["rooms"][room_id1]["limited"], False, - f'Our `timeline_limit` was {sync_params["lists"]["foo-list"]["timeline_limit"]} ' - + f'and {len(channel.json_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. ' - + str(channel.json_body["rooms"][room_id1]), + f'Our `timeline_limit` was {sync_body["lists"]["foo-list"]["timeline_limit"]} ' + + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. ' + + str(response_body["rooms"][room_id1]), ) # Check to make sure the latest events are returned self.assertEqual( [ event["event_id"] - for event in channel.json_body["rooms"][room_id1]["timeline"] + for event in response_body["rooms"][room_id1]["timeline"] ], [ event_response2["event_id"], event_response3["event_id"], ], - channel.json_body["rooms"][room_id1]["timeline"], + response_body["rooms"][room_id1]["timeline"], ) # All events are "live" self.assertEqual( - channel.json_body["rooms"][room_id1]["num_live"], + response_body["rooms"][room_id1]["num_live"], 2, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) def test_rooms_bump_stamp(self) -> None: @@ -2777,33 +2689,27 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request - timeline_limit = 100 - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": timeline_limit, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 100, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Make sure it has the foo-list we requested self.assertListEqual( - list(channel.json_body["lists"].keys()), + list(response_body["lists"].keys()), ["foo-list"], - channel.json_body["lists"].keys(), + response_body["lists"].keys(), ) # Make sure the list includes the rooms in the right order self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), + list(response_body["lists"]["foo-list"]["ops"]), [ { "op": "SYNC", @@ -2813,22 +2719,22 @@ class SlidingSyncTestCase(SlidingSyncBase): "room_ids": [room_id1, room_id2], } ], - channel.json_body["lists"]["foo-list"], + response_body["lists"]["foo-list"], ) # The `bump_stamp` for room1 should point at the latest message (not the # reaction since it's not one of the `DEFAULT_BUMP_EVENT_TYPES`) self.assertEqual( - channel.json_body["rooms"][room_id1]["bump_stamp"], + response_body["rooms"][room_id1]["bump_stamp"], event_pos1.stream, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) # The `bump_stamp` for room2 should point at the latest message self.assertEqual( - channel.json_body["rooms"][room_id2]["bump_stamp"], + response_body["rooms"][room_id2]["bump_stamp"], event_pos2.stream, - channel.json_body["rooms"][room_id2], + response_body["rooms"][room_id2], ) def test_rooms_bump_stamp_backfill(self) -> None: @@ -2932,23 +2838,18 @@ class SlidingSyncTestCase(SlidingSyncBase): # Doing an SS request should return a positive `bump_stamp`, even though # the only event that matches the bump types has as negative stream # ordering. - channel = self.make_request( - "POST", - self.sync_endpoint, - content={ - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 5, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 5, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) - self.assertGreater(channel.json_body["rooms"][room_id]["bump_stamp"], 0) + self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0) def test_rooms_newly_joined_incremental_sync(self) -> None: """ @@ -2995,28 +2896,22 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make an incremental Sliding Sync request (what we're trying to test) - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - content=sync_body, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # We should see the new events and the rest should be filled with historical # events which will make us `limited=True` since there are more to paginate to. self.assertEqual( - channel.json_body["rooms"][room_id1]["limited"], + response_body["rooms"][room_id1]["limited"], True, f"Our `timeline_limit` was {timeline_limit} " - + f'and {len(channel.json_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. ' - + str(channel.json_body["rooms"][room_id1]), + + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. ' + + str(response_body["rooms"][room_id1]), ) # Check to make sure that the "live" and historical events are returned self.assertEqual( [ event["event_id"] - for event in channel.json_body["rooms"][room_id1]["timeline"] + for event in response_body["rooms"][room_id1]["timeline"] ], [ event_response2["event_id"], @@ -3024,14 +2919,14 @@ class SlidingSyncTestCase(SlidingSyncBase): event_response3["event_id"], event_response4["event_id"], ], - channel.json_body["rooms"][room_id1]["timeline"], + response_body["rooms"][room_id1]["timeline"], ) # Only events after the `from_token` are "live" (join, event3, event4) self.assertEqual( - channel.json_body["rooms"][room_id1]["num_live"], + response_body["rooms"][room_id1]["num_live"], 3, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) def test_rooms_invite_shared_history_initial_sync(self) -> None: @@ -3068,51 +2963,46 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id1, "activity after4", tok=user2_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 3, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 3, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # `timeline` is omitted for `invite` rooms with `stripped_state` self.assertIsNone( - channel.json_body["rooms"][room_id1].get("timeline"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("timeline"), + response_body["rooms"][room_id1], ) # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("num_live"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("num_live"), + response_body["rooms"][room_id1], ) # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("limited"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("limited"), + response_body["rooms"][room_id1], ) # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("prev_batch"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("prev_batch"), + response_body["rooms"][room_id1], ) # `required_state` is omitted for `invite` rooms with `stripped_state` self.assertIsNone( - channel.json_body["rooms"][room_id1].get("required_state"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("required_state"), + response_body["rooms"][room_id1], ) # We should have some `stripped_state` so the potential joiner can identify the # room (we don't care about the order). self.assertCountEqual( - channel.json_body["rooms"][room_id1]["invite_state"], + response_body["rooms"][room_id1]["invite_state"], [ { "content": {"creator": user2_id, "room_version": "10"}, @@ -3139,7 +3029,7 @@ class SlidingSyncTestCase(SlidingSyncBase): "type": "m.room.member", }, ], - channel.json_body["rooms"][room_id1]["invite_state"], + response_body["rooms"][room_id1]["invite_state"], ) def test_rooms_invite_shared_history_incremental_sync(self) -> None: @@ -3190,43 +3080,39 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id1, "activity after toekn6", tok=user2_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - content=sync_body, - access_token=user1_tok, + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok ) - self.assertEqual(channel.code, 200, channel.json_body) # `timeline` is omitted for `invite` rooms with `stripped_state` self.assertIsNone( - channel.json_body["rooms"][room_id1].get("timeline"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("timeline"), + response_body["rooms"][room_id1], ) # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("num_live"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("num_live"), + response_body["rooms"][room_id1], ) # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("limited"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("limited"), + response_body["rooms"][room_id1], ) # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("prev_batch"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("prev_batch"), + response_body["rooms"][room_id1], ) # `required_state` is omitted for `invite` rooms with `stripped_state` self.assertIsNone( - channel.json_body["rooms"][room_id1].get("required_state"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("required_state"), + response_body["rooms"][room_id1], ) # We should have some `stripped_state` so the potential joiner can identify the # room (we don't care about the order). self.assertCountEqual( - channel.json_body["rooms"][room_id1]["invite_state"], + response_body["rooms"][room_id1]["invite_state"], [ { "content": {"creator": user2_id, "room_version": "10"}, @@ -3253,7 +3139,7 @@ class SlidingSyncTestCase(SlidingSyncBase): "type": "m.room.member", }, ], - channel.json_body["rooms"][room_id1]["invite_state"], + response_body["rooms"][room_id1]["invite_state"], ) def test_rooms_invite_world_readable_history_initial_sync(self) -> None: @@ -3307,52 +3193,47 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id1, "activity after4", tok=user2_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - # Large enough to see the latest events and before the invite - "timeline_limit": 4, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + # Large enough to see the latest events and before the invite + "timeline_limit": 4, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # `timeline` is omitted for `invite` rooms with `stripped_state` self.assertIsNone( - channel.json_body["rooms"][room_id1].get("timeline"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("timeline"), + response_body["rooms"][room_id1], ) # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("num_live"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("num_live"), + response_body["rooms"][room_id1], ) # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("limited"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("limited"), + response_body["rooms"][room_id1], ) # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("prev_batch"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("prev_batch"), + response_body["rooms"][room_id1], ) # `required_state` is omitted for `invite` rooms with `stripped_state` self.assertIsNone( - channel.json_body["rooms"][room_id1].get("required_state"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("required_state"), + response_body["rooms"][room_id1], ) # We should have some `stripped_state` so the potential joiner can identify the # room (we don't care about the order). self.assertCountEqual( - channel.json_body["rooms"][room_id1]["invite_state"], + response_body["rooms"][room_id1]["invite_state"], [ { "content": {"creator": user2_id, "room_version": "10"}, @@ -3379,7 +3260,7 @@ class SlidingSyncTestCase(SlidingSyncBase): "type": "m.room.member", }, ], - channel.json_body["rooms"][room_id1]["invite_state"], + response_body["rooms"][room_id1]["invite_state"], ) def test_rooms_invite_world_readable_history_incremental_sync(self) -> None: @@ -3447,44 +3328,38 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id1, "activity after token5", tok=user2_tok) self.helper.send(room_id1, "activity after toekn6", tok=user2_tok) - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - content=sync_body, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + # Make the incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # `timeline` is omitted for `invite` rooms with `stripped_state` self.assertIsNone( - channel.json_body["rooms"][room_id1].get("timeline"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("timeline"), + response_body["rooms"][room_id1], ) # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("num_live"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("num_live"), + response_body["rooms"][room_id1], ) # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("limited"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("limited"), + response_body["rooms"][room_id1], ) # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) self.assertIsNone( - channel.json_body["rooms"][room_id1].get("prev_batch"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("prev_batch"), + response_body["rooms"][room_id1], ) # `required_state` is omitted for `invite` rooms with `stripped_state` self.assertIsNone( - channel.json_body["rooms"][room_id1].get("required_state"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("required_state"), + response_body["rooms"][room_id1], ) # We should have some `stripped_state` so the potential joiner can identify the # room (we don't care about the order). self.assertCountEqual( - channel.json_body["rooms"][room_id1]["invite_state"], + response_body["rooms"][room_id1]["invite_state"], [ { "content": {"creator": user2_id, "room_version": "10"}, @@ -3511,7 +3386,7 @@ class SlidingSyncTestCase(SlidingSyncBase): "type": "m.room.member", }, ], - channel.json_body["rooms"][room_id1]["invite_state"], + response_body["rooms"][room_id1]["invite_state"], ) def test_rooms_ban_initial_sync(self) -> None: @@ -3539,47 +3414,42 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id1, "activity after6", tok=user2_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 3, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 3, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # We should see events before the ban but not after self.assertEqual( [ event["event_id"] - for event in channel.json_body["rooms"][room_id1]["timeline"] + for event in response_body["rooms"][room_id1]["timeline"] ], [ event_response3["event_id"], event_response4["event_id"], user1_ban_response["event_id"], ], - channel.json_body["rooms"][room_id1]["timeline"], + response_body["rooms"][room_id1]["timeline"], ) # No "live" events in an initial sync (no `from_token` to define the "live" # range) self.assertEqual( - channel.json_body["rooms"][room_id1]["num_live"], + response_body["rooms"][room_id1]["num_live"], 0, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) # There are more events to paginate to self.assertEqual( - channel.json_body["rooms"][room_id1]["limited"], + response_body["rooms"][room_id1]["limited"], True, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) def test_rooms_ban_incremental_sync1(self) -> None: @@ -3619,39 +3489,33 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id1, "activity after5", tok=user2_tok) self.helper.send(room_id1, "activity after6", tok=user2_tok) - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - content=sync_body, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + # Make the incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # We should see events before the ban but not after self.assertEqual( [ event["event_id"] - for event in channel.json_body["rooms"][room_id1]["timeline"] + for event in response_body["rooms"][room_id1]["timeline"] ], [ event_response3["event_id"], event_response4["event_id"], user1_ban_response["event_id"], ], - channel.json_body["rooms"][room_id1]["timeline"], + response_body["rooms"][room_id1]["timeline"], ) # All live events in the incremental sync self.assertEqual( - channel.json_body["rooms"][room_id1]["num_live"], + response_body["rooms"][room_id1]["num_live"], 3, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) # There aren't anymore events to paginate to in this range self.assertEqual( - channel.json_body["rooms"][room_id1]["limited"], + response_body["rooms"][room_id1]["limited"], False, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) def test_rooms_ban_incremental_sync2(self) -> None: @@ -3687,28 +3551,22 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id1, "activity after4", tok=user2_tok) - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - content=sync_body, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + # Make the incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # Nothing to see for this banned user in the room in the token range - self.assertIsNone(channel.json_body["rooms"][room_id1].get("timeline")) + self.assertIsNone(response_body["rooms"][room_id1].get("timeline")) # No events returned in the timeline so nothing is "live" self.assertEqual( - channel.json_body["rooms"][room_id1]["num_live"], + response_body["rooms"][room_id1]["num_live"], 0, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) # There aren't anymore events to paginate to in this range self.assertEqual( - channel.json_body["rooms"][room_id1]["limited"], + response_body["rooms"][room_id1]["limited"], False, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) def test_rooms_no_required_state(self) -> None: @@ -3724,27 +3582,22 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.join(room_id1, user1_id, tok=user1_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - # Empty `required_state` - "required_state": [], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + # Empty `required_state` + "required_state": [], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # No `required_state` in response self.assertIsNone( - channel.json_body["rooms"][room_id1].get("required_state"), - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1].get("required_state"), + response_body["rooms"][room_id1], ) def test_rooms_required_state_initial_sync(self) -> None: @@ -3761,40 +3614,35 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.join(room_id1, user1_id, tok=user1_tok) # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.RoomHistoryVisibility, ""], - # This one doesn't exist in the room - [EventTypes.Tombstone, ""], - ], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Tombstone, ""], + ], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) ) self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], { state_map[(EventTypes.Create, "")], state_map[(EventTypes.RoomHistoryVisibility, "")], }, exact=True, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_incremental_sync(self) -> None: """ @@ -3823,16 +3671,10 @@ class SlidingSyncTestCase(SlidingSyncBase): } } } - _, after_room_token = self.do_sync(sync_body, tok=user1_tok) + _, from_token = self.do_sync(sync_body, tok=user1_tok) - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={after_room_token}", - content=sync_body, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + # Make the incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) @@ -3842,14 +3684,14 @@ class SlidingSyncTestCase(SlidingSyncBase): # future, we will only return updates but only if we've sent the room down the # connection before. self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], { state_map[(EventTypes.Create, "")], state_map[(EventTypes.RoomHistoryVisibility, "")], }, exact=True, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard(self) -> None: """ @@ -3879,35 +3721,30 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request with wildcards for the `event_type` and `state_key` - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [StateValues.WILDCARD, StateValues.WILDCARD], - ], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [StateValues.WILDCARD, StateValues.WILDCARD], + ], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) ) self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], # We should see all the state events in the room state_map.values(), exact=True, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard_event_type(self) -> None: """ @@ -3938,23 +3775,18 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request with wildcards for the `event_type` - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [StateValues.WILDCARD, user2_id], - ], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [StateValues.WILDCARD, user2_id], + ], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) @@ -3962,7 +3794,7 @@ class SlidingSyncTestCase(SlidingSyncBase): # We expect at-least any state event with the `user2_id` as the `state_key` self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], { state_map[(EventTypes.Member, user2_id)], state_map[("org.matrix.foo_state", user2_id)], @@ -3971,7 +3803,7 @@ class SlidingSyncTestCase(SlidingSyncBase): # events when the `event_type` is a wildcard. exact=False, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard_state_key(self) -> None: """ @@ -3987,37 +3819,32 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.join(room_id1, user1_id, tok=user1_tok) # Make the Sliding Sync request with wildcards for the `state_key` - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Member, StateValues.WILDCARD], - ], - "timeline_limit": 0, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Member, StateValues.WILDCARD], + ], + "timeline_limit": 0, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) ) self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], { state_map[(EventTypes.Member, user1_id)], state_map[(EventTypes.Member, user2_id)], }, exact=True, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_lazy_loading_room_members(self) -> None: """ @@ -4040,24 +3867,19 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id1, "3", tok=user2_tok) # Make the Sliding Sync request with lazy loading for the room members - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.Member, StateValues.LAZY], - ], - "timeline_limit": 3, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.LAZY], + ], + "timeline_limit": 3, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) @@ -4065,7 +3887,7 @@ class SlidingSyncTestCase(SlidingSyncBase): # Only user2 and user3 sent events in the 3 events we see in the `timeline` self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], { state_map[(EventTypes.Create, "")], state_map[(EventTypes.Member, user2_id)], @@ -4073,7 +3895,7 @@ class SlidingSyncTestCase(SlidingSyncBase): }, exact=True, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_me(self) -> None: """ @@ -4113,25 +3935,20 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request with a request for '$ME'. - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.Member, StateValues.ME], - ["org.matrix.foo", StateValues.ME], - ], - "timeline_limit": 3, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.ME], + ["org.matrix.foo", StateValues.ME], + ], + "timeline_limit": 3, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) @@ -4139,7 +3956,7 @@ class SlidingSyncTestCase(SlidingSyncBase): # Only user2 and user3 sent events in the 3 events we see in the `timeline` self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], { state_map[(EventTypes.Create, "")], state_map[(EventTypes.Member, user1_id)], @@ -4147,7 +3964,7 @@ class SlidingSyncTestCase(SlidingSyncBase): }, exact=True, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: @@ -4210,17 +4027,11 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.leave(room_id1, user3_id, tok=user3_tok) # Make the Sliding Sync request with lazy loading for the room members - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - content=sync_body, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # Only user2 and user3 sent events in the 3 events we see in the `timeline` self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], { state_map[(EventTypes.Create, "")], state_map[(EventTypes.Member, user1_id)], @@ -4230,7 +4041,7 @@ class SlidingSyncTestCase(SlidingSyncBase): }, exact=True, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_combine_superset(self) -> None: """ @@ -4260,45 +4071,40 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request with wildcards for the `state_key` - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.Member, user1_id], - ], - "timeline_limit": 0, - }, - "bar-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Member, StateValues.WILDCARD], - ["org.matrix.foo_state", ""], - ], - "timeline_limit": 0, - }, + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, user1_id], + ], + "timeline_limit": 0, }, - "room_subscriptions": { - room_id1: { - "required_state": [["org.matrix.bar_state", ""]], - "timeline_limit": 0, - } + "bar-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Member, StateValues.WILDCARD], + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 0, }, }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + "room_subscriptions": { + room_id1: { + "required_state": [["org.matrix.bar_state", ""]], + "timeline_limit": 0, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) ) self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], { state_map[(EventTypes.Create, "")], state_map[(EventTypes.Member, user1_id)], @@ -4308,7 +4114,7 @@ class SlidingSyncTestCase(SlidingSyncBase): }, exact=True, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_partial_state(self) -> None: """ @@ -4331,28 +4137,23 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request (NOT lazy-loading room members) - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - ], - "timeline_limit": 0, - }, - } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 0, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Make sure the list includes room1 but room2 is excluded because it's still # partially-stated self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), + list(response_body["lists"]["foo-list"]["ops"]), [ { "op": "SYNC", @@ -4360,33 +4161,28 @@ class SlidingSyncTestCase(SlidingSyncBase): "room_ids": [room_id1], } ], - channel.json_body["lists"]["foo-list"], + response_body["lists"]["foo-list"], ) # Make the Sliding Sync request (with lazy-loading room members) - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - # Lazy-load room members - [EventTypes.Member, StateValues.LAZY], - ], - "timeline_limit": 0, - }, - } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + # Lazy-load room members + [EventTypes.Member, StateValues.LAZY], + ], + "timeline_limit": 0, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # The list should include both rooms now because we're lazy-loading room members self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), + list(response_body["lists"]["foo-list"]["ops"]), [ { "op": "SYNC", @@ -4394,7 +4190,7 @@ class SlidingSyncTestCase(SlidingSyncBase): "room_ids": [room_id2, room_id1], } ], - channel.json_body["lists"]["foo-list"], + response_body["lists"]["foo-list"], ) def test_room_subscriptions_with_join_membership(self) -> None: @@ -4411,22 +4207,17 @@ class SlidingSyncTestCase(SlidingSyncBase): join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) # Make the Sliding Sync request with just the room subscription - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "room_subscriptions": { - room_id1: { - "required_state": [ - [EventTypes.Create, ""], - ], - "timeline_limit": 1, - } - }, + sync_body = { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) @@ -4434,37 +4225,37 @@ class SlidingSyncTestCase(SlidingSyncBase): # We should see some state self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], { state_map[(EventTypes.Create, "")], }, exact=True, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) # We should see some events self.assertEqual( [ event["event_id"] - for event in channel.json_body["rooms"][room_id1]["timeline"] + for event in response_body["rooms"][room_id1]["timeline"] ], [ join_response["event_id"], ], - channel.json_body["rooms"][room_id1]["timeline"], + response_body["rooms"][room_id1]["timeline"], ) # No "live" events in an initial sync (no `from_token` to define the "live" # range) self.assertEqual( - channel.json_body["rooms"][room_id1]["num_live"], + response_body["rooms"][room_id1]["num_live"], 0, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) # There are more events to paginate to self.assertEqual( - channel.json_body["rooms"][room_id1]["limited"], + response_body["rooms"][room_id1]["limited"], True, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) def test_room_subscriptions_with_leave_membership(self) -> None: @@ -4505,57 +4296,52 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request with just the room subscription - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "room_subscriptions": { - room_id1: { - "required_state": [ - ["org.matrix.foo_state", ""], - ], - "timeline_limit": 2, - } - }, + sync_body = { + "room_subscriptions": { + room_id1: { + "required_state": [ + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 2, + } }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # We should see the state at the time of the leave self._assertRequiredStateIncludes( - channel.json_body["rooms"][room_id1]["required_state"], + response_body["rooms"][room_id1]["required_state"], { state_map[("org.matrix.foo_state", "")], }, exact=True, ) - self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) # We should see some before we left (nothing after) self.assertEqual( [ event["event_id"] - for event in channel.json_body["rooms"][room_id1]["timeline"] + for event in response_body["rooms"][room_id1]["timeline"] ], [ join_response["event_id"], leave_response["event_id"], ], - channel.json_body["rooms"][room_id1]["timeline"], + response_body["rooms"][room_id1]["timeline"], ) # No "live" events in an initial sync (no `from_token` to define the "live" # range) self.assertEqual( - channel.json_body["rooms"][room_id1]["num_live"], + response_body["rooms"][room_id1]["num_live"], 0, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) # There are more events to paginate to self.assertEqual( - channel.json_body["rooms"][room_id1]["limited"], + response_body["rooms"][room_id1]["limited"], True, - channel.json_body["rooms"][room_id1], + response_body["rooms"][room_id1], ) def test_room_subscriptions_no_leak_private_room(self) -> None: @@ -4576,27 +4362,20 @@ class SlidingSyncTestCase(SlidingSyncBase): ) # Make the Sliding Sync request with just the room subscription - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "room_subscriptions": { - room_id1: { - "required_state": [ - [EventTypes.Create, ""], - ], - "timeline_limit": 1, - } - }, + sync_body = { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # We should not see the room at all (we're not in it) - self.assertIsNone( - channel.json_body["rooms"].get(room_id1), channel.json_body["rooms"] - ) + self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"]) def test_room_subscriptions_world_readable(self) -> None: """ @@ -4639,28 +4418,21 @@ class SlidingSyncTestCase(SlidingSyncBase): # Note: We never join the room # Make the Sliding Sync request with just the room subscription - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "room_subscriptions": { - room_id1: { - "required_state": [ - [EventTypes.Create, ""], - ], - "timeline_limit": 1, - } - }, + sync_body = { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # FIXME: In the future, we should be able to see the room because it's # `world_readable` but currently we don't support this. - self.assertIsNone( - channel.json_body["rooms"].get(room_id1), channel.json_body["rooms"] - ) + self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"]) class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): @@ -4675,20 +4447,16 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main - self.sync_endpoint = ( - "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" - ) def _assert_to_device_response( - self, channel: FakeChannel, expected_messages: List[JsonDict] + self, response_body: JsonDict, expected_messages: List[JsonDict] ) -> str: """Assert the sliding sync response was successful and has the expected to-device messages. Returns the next_batch token from the to-device section. """ - self.assertEqual(channel.code, 200, channel.json_body) - extensions = channel.json_body["extensions"] + extensions = response_body["extensions"] to_device = extensions["to_device"] self.assertIsInstance(to_device["next_batch"], str) self.assertEqual(to_device["events"], expected_messages) @@ -4702,22 +4470,18 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } }, - access_token=user1_tok, - ) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # We expect no to-device messages - self._assert_to_device_response(channel, []) + self._assert_to_device_response(response_body, []) def test_data_initial_sync(self) -> None: """Test that we get to-device messages when we don't specify a since @@ -4738,21 +4502,17 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): ) self.assertEqual(chan.code, 200, chan.result) - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } }, - access_token=user1_tok, - ) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) self._assert_to_device_response( - channel, + response_body, [{"content": test_msg, "sender": user2_id, "type": "m.test"}], ) @@ -4764,21 +4524,17 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): user2_id = self.register_user("u2", "pass") user2_tok = self.login(user2_id, "pass", "d2") - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, + sync_body: JsonDict = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } }, - access_token=user1_tok, - ) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # No to-device messages yet. - next_batch = self._assert_to_device_response(channel, []) + next_batch = self._assert_to_device_response(response_body, []) test_msg = {"foo": "bar"} chan = self.make_request( @@ -4789,59 +4545,47 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): ) self.assertEqual(chan.code, 200, chan.result) - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - "since": next_batch, - } - }, + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + "since": next_batch, + } }, - access_token=user1_tok, - ) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) next_batch = self._assert_to_device_response( - channel, + response_body, [{"content": test_msg, "sender": user2_id, "type": "m.test"}], ) # The next sliding sync request should not include the to-device # message. - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - "since": next_batch, - } - }, + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + "since": next_batch, + } }, - access_token=user1_tok, - ) - self._assert_to_device_response(channel, []) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self._assert_to_device_response(response_body, []) # An initial sliding sync request should not include the to-device # message, as it should have been deleted - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } }, - access_token=user1_tok, - ) - self._assert_to_device_response(channel, []) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self._assert_to_device_response(response_body, []) def test_wait_for_new_data(self) -> None: """ @@ -4891,7 +4635,7 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): self.assertEqual(channel.code, 200, channel.json_body) self._assert_to_device_response( - channel, + channel.json_body, [{"content": test_msg, "sender": user2_id, "type": "m.test"}], ) @@ -4938,7 +4682,7 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): channel.await_result(timeout_ms=1200) self.assertEqual(channel.code, 200, channel.json_body) - self._assert_to_device_response(channel, []) + self._assert_to_device_response(channel.json_body, []) class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): @@ -4955,9 +4699,6 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.e2e_keys_handler = hs.get_e2e_keys_handler() - self.sync_endpoint = ( - "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" - ) def test_no_data_initial_sync(self) -> None: """ @@ -4968,27 +4709,22 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): user1_tok = self.login(user1_id, "pass") # Make an initial Sliding Sync request with the e2ee extension enabled - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Device list updates are only present for incremental syncs - self.assertIsNone(channel.json_body["extensions"]["e2ee"].get("device_lists")) + self.assertIsNone(response_body["extensions"]["e2ee"].get("device_lists")) # Both of these should be present even when empty self.assertEqual( - channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + response_body["extensions"]["e2ee"]["device_one_time_keys_count"], { # This is always present because of # https://github.com/element-hq/element-android/issues/3725 and @@ -4997,7 +4733,7 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): }, ) self.assertEqual( - channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], [], ) @@ -5020,29 +4756,21 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): _, from_token = self.do_sync(sync_body, tok=user1_tok) # Make an incremental Sliding Sync request with the e2ee extension enabled - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - content=sync_body, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # Device list shows up for incremental syncs self.assertEqual( - channel.json_body["extensions"]["e2ee"] - .get("device_lists", {}) - .get("changed"), + response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"), [], ) self.assertEqual( - channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), [], ) # Both of these should be present even when empty self.assertEqual( - channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + response_body["extensions"]["e2ee"]["device_one_time_keys_count"], { # Note that "signed_curve25519" is always returned in key count responses # regardless of whether we uploaded any keys for it. This is necessary until @@ -5055,7 +4783,7 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): }, ) self.assertEqual( - channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], [], ) @@ -5248,23 +4976,15 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): self.helper.leave(room_id, user4_id, tok=user4_tok) # Make an incremental Sliding Sync request with the e2ee extension enabled - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - content=sync_body, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # Device list updates show up self.assertEqual( - channel.json_body["extensions"]["e2ee"] - .get("device_lists", {}) - .get("changed"), + response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"), [user3_id], ) self.assertEqual( - channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), [user4_id], ) @@ -5306,24 +5026,19 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): ) # Make a Sliding Sync request with the e2ee extension enabled - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Check for those one time key counts self.assertEqual( - channel.json_body["extensions"]["e2ee"].get("device_one_time_keys_count"), + response_body["extensions"]["e2ee"].get("device_one_time_keys_count"), { "alg1": 1, "alg2": 2, @@ -5367,26 +5082,19 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): self.assertEqual(fallback_res, ["alg1"], fallback_res) # Make a Sliding Sync request with the e2ee extension enabled - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Check for the unused fallback key types self.assertListEqual( - channel.json_body["extensions"]["e2ee"].get( - "device_unused_fallback_key_types" - ), + response_body["extensions"]["e2ee"].get("device_unused_fallback_key_types"), ["alg1"], ) @@ -5404,11 +5112,7 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main - self.e2e_keys_handler = hs.get_e2e_keys_handler() self.account_data_handler = hs.get_account_data_handler() - self.sync_endpoint = ( - "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" - ) def test_no_data_initial_sync(self) -> None: """ From be4a16ff445c9dfba04aeaed695afb3a56e204f7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Jul 2024 22:45:48 +0100 Subject: [PATCH 073/332] Sliding Sync: Track whether we have sent rooms down to clients (#17447) The basic idea is that we introduce a new token for a sliding sync connection, which stores the mapping of room to room "status" (i.e. have we sent the room down?). This token allows us to handle duplicate requests properly. In future it can be used to store more "per-connection" information safely. In future this should be migrated into the DB, so its important that we try to reduce the number of syncs where we need to update the per-connection information. In this PoC this only happens when we: a) send down a set of room for the first time, or b) we have previously sent down a room and there are updates but we are not sending the room down the sync (due to not falling in a list range) Co-authored-by: Eric Eastwood --- changelog.d/17447.feature | 1 + pyproject.toml | 4 +- synapse/handlers/sliding_sync.py | 350 ++++++++++++-- synapse/rest/client/sync.py | 6 +- synapse/server.py | 1 + .../storage/databases/main/state_deltas.py | 37 ++ synapse/types/handlers/__init__.py | 3 +- synapse/types/rest/client/__init__.py | 5 + tests/rest/client/test_sync.py | 452 +++++++++++++++++- 9 files changed, 814 insertions(+), 45 deletions(-) create mode 100644 changelog.d/17447.feature diff --git a/changelog.d/17447.feature b/changelog.d/17447.feature new file mode 100644 index 0000000000..6f80e298ae --- /dev/null +++ b/changelog.d/17447.feature @@ -0,0 +1 @@ +Track which rooms have been sent to clients in the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/pyproject.toml b/pyproject.toml index 0b5dc418e4..1adf8e087f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -201,8 +201,8 @@ netaddr = ">=0.7.18" # add a lower bound to the Jinja2 dependency. Jinja2 = ">=3.0" bleach = ">=1.4.3" -# We use `Self`, which were added in `typing-extensions` 4.0. -typing-extensions = ">=4.0" +# We use `assert_never`, which were added in `typing-extensions` 4.1. +typing-extensions = ">=4.1" # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. cryptography = ">=3.4.7" diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 3231574402..2b74f1c9c9 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -18,6 +18,7 @@ # # import logging +from enum import Enum from itertools import chain from typing import ( TYPE_CHECKING, @@ -34,6 +35,7 @@ from typing import ( import attr from immutabledict import immutabledict +from typing_extensions import assert_never from synapse.api.constants import AccountDataTypes, Direction, EventTypes, Membership from synapse.events import EventBase @@ -52,6 +54,7 @@ from synapse.types import ( RoomStreamToken, SlidingSyncStreamToken, StateMap, + StrCollection, StreamKeyType, StreamToken, UserID, @@ -361,6 +364,8 @@ class SlidingSyncHandler: self.push_rules_handler = hs.get_push_rules_handler() self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync + self.connection_store = SlidingSyncConnectionStore() + async def wait_for_sync_for_user( self, requester: Requester, @@ -464,6 +469,11 @@ class SlidingSyncHandler: # See https://github.com/matrix-org/matrix-doc/issues/1144 raise NotImplementedError() + await self.connection_store.mark_token_seen( + sync_config=sync_config, + from_token=from_token, + ) + # Get all of the room IDs that the user should be able to see in the sync # response has_lists = sync_config.lists is not None and len(sync_config.lists) > 0 @@ -613,7 +623,7 @@ class SlidingSyncHandler: @tag_args async def handle_room(room_id: str) -> None: room_sync_result = await self.get_room_sync_data( - user=sync_config.user, + sync_config=sync_config, room_id=room_id, room_sync_config=relevant_room_map[room_id], room_membership_for_user_at_to_token=room_membership_for_user_map[ @@ -635,11 +645,22 @@ class SlidingSyncHandler: to_token=to_token, ) - # TODO: Update this when we implement per-connection state - connection_token = 0 + if has_lists or has_room_subscriptions: + connection_position = await self.connection_store.record_rooms( + sync_config=sync_config, + from_token=from_token, + sent_room_ids=relevant_room_map.keys(), + # TODO: We need to calculate which rooms have had updates since the `from_token` but were not included in the `sent_room_ids` + unsent_room_ids=[], + ) + elif from_token: + connection_position = from_token.connection_position + else: + # Initial sync without a `from_token` starts at `0` + connection_position = 0 return SlidingSyncResult( - next_pos=SlidingSyncStreamToken(to_token, connection_token), + next_pos=SlidingSyncStreamToken(to_token, connection_position), lists=lists, rooms=rooms, extensions=extensions, @@ -1370,7 +1391,7 @@ class SlidingSyncHandler: async def get_room_sync_data( self, - user: UserID, + sync_config: SlidingSyncConfig, room_id: str, room_sync_config: RoomSyncConfig, room_membership_for_user_at_to_token: _RoomMembershipForUser, @@ -1392,6 +1413,37 @@ class SlidingSyncHandler: from_token: The point in the stream to sync from. to_token: The point in the stream to sync up to. """ + user = sync_config.user + + # Determine whether we should limit the timeline to the token range. + # + # We should return historical messages (before token range) in the + # following cases because we want clients to be able to show a basic + # screen of information: + # - Initial sync (because no `from_token` to limit us anyway) + # - When users `newly_joined` + # - For an incremental sync where we haven't sent it down this + # connection before + from_bound = None + initial = True + if from_token and not room_membership_for_user_at_to_token.newly_joined: + room_status = await self.connection_store.have_sent_room( + sync_config=sync_config, + connection_token=from_token.connection_position, + room_id=room_id, + ) + if room_status.status == HaveSentRoomFlag.LIVE: + from_bound = from_token.stream_token.room_key + initial = False + elif room_status.status == HaveSentRoomFlag.PREVIOUSLY: + assert room_status.last_token is not None + from_bound = room_status.last_token + initial = False + elif room_status.status == HaveSentRoomFlag.NEVER: + from_bound = None + initial = True + else: + assert_never(room_status.status) # Assemble the list of timeline events # @@ -1418,36 +1470,23 @@ class SlidingSyncHandler: prev_batch_token = to_token # We're going to paginate backwards from the `to_token` - from_bound = to_token.room_key + to_bound = to_token.room_key # People shouldn't see past their leave/ban event if room_membership_for_user_at_to_token.membership in ( Membership.LEAVE, Membership.BAN, ): - from_bound = ( + to_bound = ( room_membership_for_user_at_to_token.event_pos.to_room_stream_token() ) - # Determine whether we should limit the timeline to the token range. - # - # We should return historical messages (before token range) in the - # following cases because we want clients to be able to show a basic - # screen of information: - # - Initial sync (because no `from_token` to limit us anyway) - # - When users `newly_joined` - # - TODO: For an incremental sync where we haven't sent it down this - # connection before - to_bound = ( - from_token.stream_token.room_key - if from_token is not None - and not room_membership_for_user_at_to_token.newly_joined - else None - ) - timeline_events, new_room_key = await self.store.paginate_room_events( room_id=room_id, - from_key=from_bound, - to_key=to_bound, + # The bounds are reversed so we can paginate backwards + # (from newer to older events) starting at to_bound. + # This ensures we fill the `limit` with the newest events first, + from_key=to_bound, + to_key=from_bound, direction=Direction.BACKWARDS, # We add one so we can determine if there are enough events to saturate # the limit or not (see `limited`) @@ -1564,12 +1603,6 @@ class SlidingSyncHandler: # indicate to the client that a state reset happened. Perhaps we should indicate # this by setting `initial: True` and empty `required_state`. - # TODO: Since we can't determine whether we've already sent a room down this - # Sliding Sync connection before (we plan to add this optimization in the - # future), we're always returning the requested room state instead of - # updates. - initial = True - # Check whether the room has a name set name_state_ids = await self.get_current_state_ids_at( room_id=room_id, @@ -1715,9 +1748,22 @@ class SlidingSyncHandler: to_token=to_token, ) else: - # TODO: Once we can figure out if we've sent a room down this connection before, - # we can return updates instead of the full required state. - raise NotImplementedError() + assert from_bound is not None + + # TODO: Limit the number of state events we're about to send down + # the room, if its too many we should change this to an + # `initial=True`? + deltas = await self.store.get_current_state_deltas_for_room( + room_id=room_id, + from_token=from_bound, + to_token=to_token.room_key, + ) + # TODO: Filter room state before fetching events + # TODO: Handle state resets where event_id is None + events = await self.store.get_events( + [d.event_id for d in deltas if d.event_id] + ) + room_state = {(s.type, s.state_key): s for s in events.values()} required_room_state: StateMap[EventBase] = {} if required_state_filter != StateFilter.none(): @@ -1863,7 +1909,7 @@ class SlidingSyncHandler: to_token: The point in the stream to sync up to. """ user_id = sync_config.user.to_string() - device_id = sync_config.device_id + device_id = sync_config.requester.device_id # Skip if the extension is not enabled if not to_device_request.enabled: @@ -1939,7 +1985,7 @@ class SlidingSyncHandler: from_token: The point in the stream to sync from. """ user_id = sync_config.user.to_string() - device_id = sync_config.device_id + device_id = sync_config.requester.device_id # Skip if the extension is not enabled if not e2ee_request.enabled: @@ -2094,3 +2140,235 @@ class SlidingSyncHandler: global_account_data_map=global_account_data_map, account_data_by_room_map=account_data_by_room_map, ) + + +class HaveSentRoomFlag(Enum): + """Flag for whether we have sent the room down a sliding sync connection. + + The valid state changes here are: + NEVER -> LIVE + LIVE -> PREVIOUSLY + PREVIOUSLY -> LIVE + """ + + # The room has never been sent down (or we have forgotten we have sent it + # down). + NEVER = 1 + + # We have previously sent the room down, but there are updates that we + # haven't sent down. + PREVIOUSLY = 2 + + # We have sent the room down and the client has received all updates. + LIVE = 3 + + +@attr.s(auto_attribs=True, slots=True, frozen=True) +class HaveSentRoom: + """Whether we have sent the room down a sliding sync connection. + + Attributes: + status: Flag of if we have or haven't sent down the room + last_token: If the flag is `PREVIOUSLY` then this is non-null and + contains the last stream token of the last updates we sent down + the room, i.e. we still need to send everything since then to the + client. + """ + + status: HaveSentRoomFlag + last_token: Optional[RoomStreamToken] + + @staticmethod + def previously(last_token: RoomStreamToken) -> "HaveSentRoom": + """Constructor for `PREVIOUSLY` flag.""" + return HaveSentRoom(HaveSentRoomFlag.PREVIOUSLY, last_token) + + +HAVE_SENT_ROOM_NEVER = HaveSentRoom(HaveSentRoomFlag.NEVER, None) +HAVE_SENT_ROOM_LIVE = HaveSentRoom(HaveSentRoomFlag.LIVE, None) + + +@attr.s(auto_attribs=True) +class SlidingSyncConnectionStore: + """In-memory store of per-connection state, including what rooms we have + previously sent down a sliding sync connection. + + Note: This is NOT safe to run in a worker setup because connection positions will + point to different sets of rooms on different workers. e.g. for the same connection, + a connection position of 5 might have totally different states on worker A and + worker B. + + One complication that we need to deal with here is needing to handle requests being + resent, i.e. if we sent down a room in a response that the client received, we must + consider the room *not* sent when we get the request again. + + This is handled by using an integer "token", which is returned to the client + as part of the sync token. For each connection we store a mapping from + tokens to the room states, and create a new entry when we send down new + rooms. + + Note that for any given sliding sync connection we will only store a maximum + of two different tokens: the previous token from the request and a new token + sent in the response. When we receive a request with a given token, we then + clear out all other entries with a different token. + + Attributes: + _connections: Mapping from `(user_id, conn_id)` to mapping of `token` + to mapping of room ID to `HaveSentRoom`. + """ + + # `(user_id, conn_id)` -> `token` -> `room_id` -> `HaveSentRoom` + _connections: Dict[Tuple[str, str], Dict[int, Dict[str, HaveSentRoom]]] = ( + attr.Factory(dict) + ) + + async def have_sent_room( + self, sync_config: SlidingSyncConfig, connection_token: int, room_id: str + ) -> HaveSentRoom: + """For the given user_id/conn_id/token, return whether we have + previously sent the room down + """ + + conn_key = self._get_connection_key(sync_config) + sync_statuses = self._connections.setdefault(conn_key, {}) + room_status = sync_statuses.get(connection_token, {}).get( + room_id, HAVE_SENT_ROOM_NEVER + ) + + return room_status + + async def record_rooms( + self, + sync_config: SlidingSyncConfig, + from_token: Optional[SlidingSyncStreamToken], + *, + sent_room_ids: StrCollection, + unsent_room_ids: StrCollection, + ) -> int: + """Record which rooms we have/haven't sent down in a new response + + Attributes: + sync_config + from_token: The since token from the request, if any + sent_room_ids: The set of room IDs that we have sent down as + part of this request (only needs to be ones we didn't + previously sent down). + unsent_room_ids: The set of room IDs that have had updates + since the `from_token`, but which were not included in + this request + """ + prev_connection_token = 0 + if from_token is not None: + prev_connection_token = from_token.connection_position + + # If there are no changes then this is a noop. + if not sent_room_ids and not unsent_room_ids: + return prev_connection_token + + conn_key = self._get_connection_key(sync_config) + sync_statuses = self._connections.setdefault(conn_key, {}) + + # Generate a new token, removing any existing entries in that token + # (which can happen if requests get resent). + new_store_token = prev_connection_token + 1 + sync_statuses.pop(new_store_token, None) + + # Copy over and update the room mappings. + new_room_statuses = dict(sync_statuses.get(prev_connection_token, {})) + + # Whether we have updated the `new_room_statuses`, if we don't by the + # end we can treat this as a noop. + have_updated = False + for room_id in sent_room_ids: + new_room_statuses[room_id] = HAVE_SENT_ROOM_LIVE + have_updated = True + + # Whether we add/update the entries for unsent rooms depends on the + # existing entry: + # - LIVE: We have previously sent down everything up to + # `last_room_token, so we update the entry to be `PREVIOUSLY` with + # `last_room_token`. + # - PREVIOUSLY: We have previously sent down everything up to *a* + # given token, so we don't need to update the entry. + # - NEVER: We have never previously sent down the room, and we haven't + # sent anything down this time either so we leave it as NEVER. + + # Work out the new state for unsent rooms that were `LIVE`. + if from_token: + new_unsent_state = HaveSentRoom.previously(from_token.stream_token.room_key) + else: + new_unsent_state = HAVE_SENT_ROOM_NEVER + + for room_id in unsent_room_ids: + prev_state = new_room_statuses.get(room_id) + if prev_state is not None and prev_state.status == HaveSentRoomFlag.LIVE: + new_room_statuses[room_id] = new_unsent_state + have_updated = True + + if not have_updated: + return prev_connection_token + + sync_statuses[new_store_token] = new_room_statuses + + return new_store_token + + async def mark_token_seen( + self, + sync_config: SlidingSyncConfig, + from_token: Optional[SlidingSyncStreamToken], + ) -> None: + """We have received a request with the given token, so we can clear out + any other tokens associated with the connection. + + If there is no from token then we have started afresh, and so we delete + all tokens associated with the device. + """ + # Clear out any tokens for the connection that doesn't match the one + # from the request. + + conn_key = self._get_connection_key(sync_config) + sync_statuses = self._connections.pop(conn_key, {}) + if from_token is None: + return + + sync_statuses = { + connection_token: room_statuses + for connection_token, room_statuses in sync_statuses.items() + if connection_token == from_token.connection_position + } + if sync_statuses: + self._connections[conn_key] = sync_statuses + + @staticmethod + def _get_connection_key(sync_config: SlidingSyncConfig) -> Tuple[str, str]: + """Return a unique identifier for this connection. + + The first part is simply the user ID. + + The second part is generally a combination of device ID and conn_id. + However, both these two are optional (e.g. puppet access tokens don't + have device IDs), so this handles those edge cases. + + We use this over the raw `conn_id` to avoid clashes between different + clients that use the same `conn_id`. Imagine a user uses a web client + that uses `conn_id: main_sync_loop` and an Android client that also has + a `conn_id: main_sync_loop`. + """ + + user_id = sync_config.user.to_string() + + # Only one sliding sync connection is allowed per given conn_id (empty + # or not). + conn_id = sync_config.conn_id or "" + + if sync_config.requester.device_id: + return (user_id, f"D/{sync_config.requester.device_id}/{conn_id}") + + if sync_config.requester.access_token_id: + # If we don't have a device, then the access token ID should be a + # stable ID. + return (user_id, f"A/{sync_config.requester.access_token_id}/{conn_id}") + + # If we have neither then its likely an AS or some weird token. Either + # way we can just fail here. + raise Exception("Cannot use sliding sync with access token type") diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 7cf1f56435..bf3ac8d483 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -881,7 +881,6 @@ class SlidingSyncRestServlet(RestServlet): ) user = requester.user - device_id = requester.device_id timeout = parse_integer(request, "timeout", default=0) # Position in the stream @@ -902,11 +901,12 @@ class SlidingSyncRestServlet(RestServlet): sync_config = SlidingSyncConfig( user=user, - device_id=device_id, + requester=requester, # FIXME: Currently, we're just manually copying the fields from the - # `SlidingSyncBody` into the config. How can we gurantee into the future + # `SlidingSyncBody` into the config. How can we guarantee into the future # that we don't forget any? I would like something more structured like # `copy_attributes(from=body, to=config)` + conn_id=body.conn_id, lists=body.lists, room_subscriptions=body.room_subscriptions, extensions=body.extensions, diff --git a/synapse/server.py b/synapse/server.py index 4a3f9ff934..46b9d83a04 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -559,6 +559,7 @@ class HomeServer(metaclass=abc.ABCMeta): def get_sync_handler(self) -> SyncHandler: return SyncHandler(self) + @cache_in_self def get_sliding_sync_handler(self) -> SlidingSyncHandler: return SlidingSyncHandler(self) diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index 036972ac25..da3ebe66b8 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -26,6 +26,8 @@ import attr from synapse.storage._base import SQLBaseStore from synapse.storage.database import LoggingTransaction +from synapse.storage.databases.main.stream import _filter_results_by_stream +from synapse.types import RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache logger = logging.getLogger(__name__) @@ -156,3 +158,38 @@ class StateDeltasStore(SQLBaseStore): "get_max_stream_id_in_current_state_deltas", self._get_max_stream_id_in_current_state_deltas_txn, ) + + async def get_current_state_deltas_for_room( + self, room_id: str, from_token: RoomStreamToken, to_token: RoomStreamToken + ) -> List[StateDelta]: + """Get the state deltas between two tokens.""" + + def get_current_state_deltas_for_room_txn( + txn: LoggingTransaction, + ) -> List[StateDelta]: + sql = """ + SELECT instance_name, stream_id, type, state_key, event_id, prev_event_id + FROM current_state_delta_stream + WHERE room_id = ? AND ? < stream_id AND stream_id <= ? + ORDER BY stream_id ASC + """ + txn.execute( + sql, (room_id, from_token.stream, to_token.get_max_stream_pos()) + ) + + return [ + StateDelta( + stream_id=row[1], + room_id=room_id, + event_type=row[2], + state_key=row[3], + event_id=row[4], + prev_event_id=row[5], + ) + for row in txn + if _filter_results_by_stream(from_token, to_token, row[0], row[1]) + ] + + return await self.db_pool.runInteraction( + "get_current_state_deltas_for_room", get_current_state_deltas_for_room_txn + ) diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 479222a18d..f3141b05a0 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -35,6 +35,7 @@ from synapse.types import ( DeviceListUpdates, JsonDict, JsonMapping, + Requester, SlidingSyncStreamToken, StreamToken, UserID, @@ -109,7 +110,7 @@ class SlidingSyncConfig(SlidingSyncBody): """ user: UserID - device_id: Optional[str] + requester: Requester # Pydantic config class Config: diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 34e07ddac5..dfe3b1e0f7 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -120,6 +120,9 @@ class SlidingSyncBody(RequestBodyModel): Sliding Sync API request body. Attributes: + conn_id: An optional string to identify this connection to the server. + Only one sliding sync connection is allowed per given conn_id (empty + or not). lists: Sliding window API. A map of list key to list information (:class:`SlidingSyncList`). Max lists: 100. The list keys should be arbitrary strings which the client is using to refer to the list. Keep this @@ -343,6 +346,8 @@ class SlidingSyncBody(RequestBodyModel): e2ee: Optional[E2eeExtension] = None account_data: Optional[AccountDataExtension] = None + conn_id: Optional[str] + # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: lists: Optional[Dict[str, SlidingSyncList]] = None diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 2bbbd95a76..3e7b8f76a1 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -59,6 +59,7 @@ from synapse.types import ( StreamToken, UserID, ) +from synapse.types.handlers import SlidingSyncConfig from synapse.util import Clock from synapse.util.stringutils import random_string @@ -3676,13 +3677,52 @@ class SlidingSyncTestCase(SlidingSyncBase): # Make the incremental Sliding Sync request response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + # We only return updates but only if we've sent the room down the + # connection before. + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_incremental_sync_restart(self) -> None: + """ + Test `rooms.required_state` returns requested state events in the room during an + incremental sync, after a restart (and so the in memory caches are reset). + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Tombstone, ""], + ], + "timeline_limit": 1, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Reset the in-memory cache + self.hs.get_sliding_sync_handler().connection_store._connections.clear() + + # Make the Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # If the cache has been cleared then we do expect the state to come down state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) ) - # The returned state doesn't change from initial to incremental sync. In the - # future, we will only return updates but only if we've sent the room down the - # connection before. self._assertRequiredStateIncludes( response_body["rooms"][room_id1]["required_state"], { @@ -4434,6 +4474,412 @@ class SlidingSyncTestCase(SlidingSyncBase): # `world_readable` but currently we don't support this. self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"]) + def test_rooms_required_state_incremental_sync_LIVE(self) -> None: + """Test that we only get state updates in incremental sync for rooms + we've already seen (LIVE). + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Name, ""], + ], + "timeline_limit": 0, + } + } + } + + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.RoomHistoryVisibility, "")], + }, + exact=True, + ) + + # Send a state event + self.helper.send_state( + room_id1, EventTypes.Name, body={"name": "foo"}, tok=user2_tok + ) + + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self.assertNotIn("initial", response_body["rooms"][room_id1]) + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + @parameterized.expand([(False,), (True,)]) + def test_rooms_timeline_incremental_sync_PREVIOUSLY(self, limited: bool) -> None: + """ + Test getting room data where we have previously sent down the room, but + we missed sending down some timeline events previously and so its status + is considered PREVIOUSLY. + + There are two versions of this test, one where there are more messages + than the timeline limit, and one where there isn't. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + + self.helper.send(room_id1, "msg", tok=user1_tok) + + timeline_limit = 5 + conn_id = "conn_id" + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": timeline_limit, + } + }, + "conn_id": "conn_id", + } + + # The first room gets sent down the initial sync + response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok) + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + + # We now send down some events in room1 (depending on the test param). + expected_events = [] # The set of events in the timeline + if limited: + for _ in range(10): + resp = self.helper.send(room_id1, "msg1", tok=user1_tok) + expected_events.append(resp["event_id"]) + else: + resp = self.helper.send(room_id1, "msg1", tok=user1_tok) + expected_events.append(resp["event_id"]) + + # A second messages happens in the other room, so room1 won't get sent down. + self.helper.send(room_id2, "msg", tok=user1_tok) + + # Only the second room gets sent down sync. + response_body, from_token = self.do_sync( + sync_body, since=initial_from_token, tok=user1_tok + ) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id2}, response_body["rooms"] + ) + + # FIXME: This is a hack to record that the first room wasn't sent down + # sync, as we don't implement that currently. + sliding_sync_handler = self.hs.get_sliding_sync_handler() + requester = self.get_success( + self.hs.get_auth().get_user_by_access_token(user1_tok) + ) + sync_config = SlidingSyncConfig( + user=requester.user, + requester=requester, + conn_id=conn_id, + ) + + parsed_initial_from_token = self.get_success( + SlidingSyncStreamToken.from_string(self.store, initial_from_token) + ) + connection_position = self.get_success( + sliding_sync_handler.connection_store.record_rooms( + sync_config, + parsed_initial_from_token, + sent_room_ids=[], + unsent_room_ids=[room_id1], + ) + ) + + # FIXME: Now fix up `from_token` with new connect position above. + parsed_from_token = self.get_success( + SlidingSyncStreamToken.from_string(self.store, from_token) + ) + parsed_from_token = SlidingSyncStreamToken( + stream_token=parsed_from_token.stream_token, + connection_position=connection_position, + ) + from_token = self.get_success(parsed_from_token.to_string(self.store)) + + # We now send another event to room1, so we should sync all the missing events. + resp = self.helper.send(room_id1, "msg2", tok=user1_tok) + expected_events.append(resp["event_id"]) + + # This sync should contain the messages from room1 not yet sent down. + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + self.assertNotIn("initial", response_body["rooms"][room_id1]) + + self.assertEqual( + [ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]], + expected_events[-timeline_limit:], + ) + self.assertEqual(response_body["rooms"][room_id1]["limited"], limited) + self.assertEqual(response_body["rooms"][room_id1].get("required_state"), None) + + def test_rooms_required_state_incremental_sync_PREVIOUSLY(self) -> None: + """ + Test getting room data where we have previously sent down the room, but + we missed sending down some state previously and so its status is + considered PREVIOUSLY. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + + self.helper.send(room_id1, "msg", tok=user1_tok) + + conn_id = "conn_id" + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Name, ""], + ], + "timeline_limit": 0, + } + }, + "conn_id": "conn_id", + } + + # The first room gets sent down the initial sync + response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok) + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + + # We now send down some state in room1 + resp = self.helper.send_state( + room_id1, EventTypes.Name, {"name": "foo"}, tok=user1_tok + ) + name_change_id = resp["event_id"] + + # A second messages happens in the other room, so room1 won't get sent down. + self.helper.send(room_id2, "msg", tok=user1_tok) + + # Only the second room gets sent down sync. + response_body, from_token = self.do_sync( + sync_body, since=initial_from_token, tok=user1_tok + ) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id2}, response_body["rooms"] + ) + + # FIXME: This is a hack to record that the first room wasn't sent down + # sync, as we don't implement that currently. + sliding_sync_handler = self.hs.get_sliding_sync_handler() + requester = self.get_success( + self.hs.get_auth().get_user_by_access_token(user1_tok) + ) + sync_config = SlidingSyncConfig( + user=requester.user, + requester=requester, + conn_id=conn_id, + ) + + parsed_initial_from_token = self.get_success( + SlidingSyncStreamToken.from_string(self.store, initial_from_token) + ) + connection_position = self.get_success( + sliding_sync_handler.connection_store.record_rooms( + sync_config, + parsed_initial_from_token, + sent_room_ids=[], + unsent_room_ids=[room_id1], + ) + ) + + # FIXME: Now fix up `from_token` with new connect position above. + parsed_from_token = self.get_success( + SlidingSyncStreamToken.from_string(self.store, from_token) + ) + parsed_from_token = SlidingSyncStreamToken( + stream_token=parsed_from_token.stream_token, + connection_position=connection_position, + ) + from_token = self.get_success(parsed_from_token.to_string(self.store)) + + # We now send another event to room1, so we should sync all the missing state. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # This sync should contain the state changes from room1. + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + self.assertNotIn("initial", response_body["rooms"][room_id1]) + + # We should only see the name change. + self.assertEqual( + [ + ev["event_id"] + for ev in response_body["rooms"][room_id1]["required_state"] + ], + [name_change_id], + ) + + def test_rooms_required_state_incremental_sync_NEVER(self) -> None: + """ + Test getting `required_state` where we have NEVER sent down the room before + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + + self.helper.send(room_id1, "msg", tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Name, ""], + ], + "timeline_limit": 1, + } + }, + } + + # A message happens in the other room, so room1 won't get sent down. + self.helper.send(room_id2, "msg", tok=user1_tok) + + # Only the second room gets sent down sync. + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id2}, response_body["rooms"] + ) + + # We now send another event to room1, so we should send down the full + # room. + self.helper.send(room_id1, "msg2", tok=user1_tok) + + # This sync should contain the messages from room1 not yet sent down. + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.RoomHistoryVisibility, "")], + }, + exact=True, + ) + + def test_rooms_timeline_incremental_sync_NEVER(self) -> None: + """ + Test getting timeline room data where we have NEVER sent down the room + before + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 5, + } + }, + } + + expected_events = [] + for _ in range(4): + resp = self.helper.send(room_id1, "msg", tok=user1_tok) + expected_events.append(resp["event_id"]) + + # A message happens in the other room, so room1 won't get sent down. + self.helper.send(room_id2, "msg", tok=user1_tok) + + # Only the second room gets sent down sync. + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id2}, response_body["rooms"] + ) + + # We now send another event to room1 so it comes down sync + resp = self.helper.send(room_id1, "msg2", tok=user1_tok) + expected_events.append(resp["event_id"]) + + # This sync should contain the messages from room1 not yet sent down. + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + + self.assertEqual( + [ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]], + expected_events, + ) + self.assertEqual(response_body["rooms"][room_id1]["limited"], True) + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): """Tests for the to-device sliding sync extension""" From 34306be5aa7ebf7913dd28d048887802dc7e079b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2024 09:30:44 +0100 Subject: [PATCH 074/332] Only send rooms with updates down sliding sync (#17479) Rather than always including all rooms in range. Also adds a pre-filter to rooms that checks the stream change cache to see if anything might have happened. Based on #17447 --------- Co-authored-by: Eric Eastwood --- changelog.d/17479.misc | 1 + synapse/handlers/sliding_sync.py | 51 +++++++++++++- synapse/storage/databases/main/stream.py | 10 +++ synapse/types/handlers/__init__.py | 17 ++++- tests/rest/client/test_sync.py | 89 +++++++++++++++++------- 5 files changed, 138 insertions(+), 30 deletions(-) create mode 100644 changelog.d/17479.misc diff --git a/changelog.d/17479.misc b/changelog.d/17479.misc new file mode 100644 index 0000000000..4502f71662 --- /dev/null +++ b/changelog.d/17479.misc @@ -0,0 +1 @@ +Do not send down empty room entries down experimental sliding sync endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 2b74f1c9c9..84f2fa18ff 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -619,6 +619,51 @@ class SlidingSyncHandler: # Fetch room data rooms: Dict[str, SlidingSyncResult.RoomResult] = {} + # Filter out rooms that haven't received updates and we've sent down + # previously. + if from_token: + rooms_should_send = set() + + # First we check if there are rooms that match a list/room + # subscription and have updates we need to send (i.e. either because + # we haven't sent the room down, or we have but there are missing + # updates). + for room_id in relevant_room_map: + status = await self.connection_store.have_sent_room( + sync_config, + from_token.connection_position, + room_id, + ) + if ( + # The room was never sent down before so the client needs to know + # about it regardless of any updates. + status.status == HaveSentRoomFlag.NEVER + # `PREVIOUSLY` literally means the "room was sent down before *AND* + # there are updates we haven't sent down" so we already know this + # room has updates. + or status.status == HaveSentRoomFlag.PREVIOUSLY + ): + rooms_should_send.add(room_id) + elif status.status == HaveSentRoomFlag.LIVE: + # We know that we've sent all updates up until `from_token`, + # so we just need to check if there have been updates since + # then. + pass + else: + assert_never(status.status) + + # We only need to check for new events since any state changes + # will also come down as new events. + rooms_that_have_updates = self.store.get_rooms_that_might_have_updates( + relevant_room_map.keys(), from_token.stream_token.room_key + ) + rooms_should_send.update(rooms_that_have_updates) + relevant_room_map = { + room_id: room_sync_config + for room_id, room_sync_config in relevant_room_map.items() + if room_id in rooms_should_send + } + @trace @tag_args async def handle_room(room_id: str) -> None: @@ -633,7 +678,9 @@ class SlidingSyncHandler: to_token=to_token, ) - rooms[room_id] = room_sync_result + # Filter out empty room results during incremental sync + if room_sync_result or not from_token: + rooms[room_id] = room_sync_result with start_active_span("sliding_sync.generate_room_entries"): await concurrently_execute(handle_room, relevant_room_map, 10) @@ -2198,7 +2245,7 @@ class SlidingSyncConnectionStore: a connection position of 5 might have totally different states on worker A and worker B. - One complication that we need to deal with here is needing to handle requests being + One complication that we need to deal with here is needing to handle requests being resent, i.e. if we sent down a room in a response that the client received, we must consider the room *not* sent when we get the request again. diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index b034361aec..4207e73c7f 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -2104,3 +2104,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return RoomStreamToken(stream=last_position.stream - 1) return None + + def get_rooms_that_might_have_updates( + self, room_ids: StrCollection, from_token: RoomStreamToken + ) -> StrCollection: + """Filters given room IDs down to those that might have updates, i.e. + removes rooms that definitely do not have updates. + """ + return self._events_stream_cache.get_entities_changed( + room_ids, from_token.stream + ) diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index f3141b05a0..f26cc0e903 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -238,6 +238,17 @@ class SlidingSyncResult: notification_count: int highlight_count: int + def __bool__(self) -> bool: + return ( + # If this is the first time the client is seeing the room, we should not filter it out + # under any circumstance. + self.initial + # We need to let the client know if there are any new events + or bool(self.required_state) + or bool(self.timeline_events) + or bool(self.stripped_state) + ) + @attr.s(slots=True, frozen=True, auto_attribs=True) class SlidingWindowList: """ @@ -367,7 +378,11 @@ class SlidingSyncResult: to tell if the notifier needs to wait for more events when polling for events. """ - return bool(self.lists or self.rooms or self.extensions) + # We don't include `self.lists` here, as a) `lists` is always non-empty even if + # there are no changes, and b) since we're sorting rooms by `stream_ordering` of + # the latest activity, anything that would cause the order to change would end + # up in `self.rooms` and cause us to send down the change. + return bool(self.rooms or self.extensions) @staticmethod def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult": diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 3e7b8f76a1..5abf1041be 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -69,7 +69,6 @@ from tests.federation.transport.test_knocking import ( ) from tests.server import TimedOutException from tests.test_utils.event_injection import create_event, mark_event_as_partial_state -from tests.unittest import skip_unless logger = logging.getLogger(__name__) @@ -1656,12 +1655,6 @@ class SlidingSyncTestCase(SlidingSyncBase): channel.json_body["rooms"][room_id]["timeline"], ) - # TODO: Once we remove `ops`, we should be able to add a `RoomResult.__bool__` to - # check if there are any updates since the `from_token`. - @skip_unless( - False, - "Once we remove ops from the Sliding Sync response, this test should pass", - ) def test_wait_for_new_data_timeout(self) -> None: """ Test to make sure that the Sliding Sync request waits for new data to arrive but @@ -1711,12 +1704,8 @@ class SlidingSyncTestCase(SlidingSyncBase): channel.await_result(timeout_ms=1200) self.assertEqual(channel.code, 200, channel.json_body) - # We still see rooms because that's how Sliding Sync lists work but we reached - # the timeout before seeing them - self.assertEqual( - [event["event_id"] for event in channel.json_body["rooms"].keys()], - [room_id], - ) + # There should be no room sent down. + self.assertFalse(channel.json_body["rooms"]) def test_filter_list(self) -> None: """ @@ -3556,19 +3545,7 @@ class SlidingSyncTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # Nothing to see for this banned user in the room in the token range - self.assertIsNone(response_body["rooms"][room_id1].get("timeline")) - # No events returned in the timeline so nothing is "live" - self.assertEqual( - response_body["rooms"][room_id1]["num_live"], - 0, - response_body["rooms"][room_id1], - ) - # There aren't anymore events to paginate to in this range - self.assertEqual( - response_body["rooms"][room_id1]["limited"], - False, - response_body["rooms"][room_id1], - ) + self.assertIsNone(response_body["rooms"].get(room_id1)) def test_rooms_no_required_state(self) -> None: """ @@ -3668,12 +3645,15 @@ class SlidingSyncTestCase(SlidingSyncBase): # This one doesn't exist in the room [EventTypes.Tombstone, ""], ], - "timeline_limit": 0, + "timeline_limit": 1, } } } _, from_token = self.do_sync(sync_body, tok=user1_tok) + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + # Make the incremental Sliding Sync request response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) @@ -4880,6 +4860,61 @@ class SlidingSyncTestCase(SlidingSyncBase): self.assertEqual(response_body["rooms"][room_id1]["limited"], True) self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + def test_rooms_with_no_updates_do_not_come_down_incremental_sync(self) -> None: + """ + Test that rooms with no updates are returned in subsequent incremental + syncs. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Nothing has happened in the room, so the room should not come down + # /sync. + self.assertIsNone(response_body["rooms"].get(room_id1)) + + def test_empty_initial_room_comes_down_sync(self) -> None: + """ + Test that rooms come down /sync even with empty required state and + timeline limit in initial sync. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + + # Make the Sliding Sync request + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): """Tests for the to-device sliding sync extension""" From 808dab0699c2e5f52e9153650323ed93b0bc4c25 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 30 Jul 2024 09:51:24 +0100 Subject: [PATCH 075/332] Fix `failures` property in `/keys/query` (#17499) Fixes: https://github.com/element-hq/synapse/issues/17498 Fixes: https://github.com/element-hq/element-web/issues/27867 --- changelog.d/17499.bugfix | 1 + synapse/handlers/e2e_keys.py | 26 ++++++++++----- tests/handlers/test_e2e_keys.py | 59 +++++++++++++++++++++++++++++++-- 3 files changed, 75 insertions(+), 11 deletions(-) create mode 100644 changelog.d/17499.bugfix diff --git a/changelog.d/17499.bugfix b/changelog.d/17499.bugfix new file mode 100644 index 0000000000..5cb7b3c30e --- /dev/null +++ b/changelog.d/17499.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.110.0 which caused `/keys/query` to return incomplete results, leading to high network activity and CPU usage on Matrix clients. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 668cec513b..f78e66ad0a 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -291,13 +291,20 @@ class E2eKeysHandler: # Only try and fetch keys for destinations that are not marked as # down. - filtered_destinations = await filter_destinations_by_retry_limiter( - remote_queries_not_in_cache.keys(), - self.clock, - self.store, - # Let's give an arbitrary grace period for those hosts that are - # only recently down - retry_due_within_ms=60 * 1000, + unfiltered_destinations = remote_queries_not_in_cache.keys() + filtered_destinations = set( + await filter_destinations_by_retry_limiter( + unfiltered_destinations, + self.clock, + self.store, + # Let's give an arbitrary grace period for those hosts that are + # only recently down + retry_due_within_ms=60 * 1000, + ) + ) + failures.update( + (dest, _NOT_READY_FOR_RETRY_FAILURE) + for dest in (unfiltered_destinations - filtered_destinations) ) await concurrently_execute( @@ -1641,6 +1648,9 @@ def _check_device_signature( raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE) +_NOT_READY_FOR_RETRY_FAILURE = {"status": 503, "message": "Not ready for retry"} + + def _exception_to_failure(e: Exception) -> JsonDict: if isinstance(e, SynapseError): return {"status": e.code, "errcode": e.errcode, "message": str(e)} @@ -1649,7 +1659,7 @@ def _exception_to_failure(e: Exception) -> JsonDict: return {"status": e.code, "message": str(e)} if isinstance(e, NotRetryingDestination): - return {"status": 503, "message": "Not ready for retry"} + return _NOT_READY_FOR_RETRY_FAILURE # include ConnectionRefused and other errors # diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 0e6352ff4b..8a3dfdcf75 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -43,9 +43,7 @@ from tests.unittest import override_config class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.appservice_api = mock.AsyncMock() - return self.setup_test_homeserver( - federation_client=mock.Mock(), application_service_api=self.appservice_api - ) + return self.setup_test_homeserver(application_service_api=self.appservice_api) def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_e2e_keys_handler() @@ -1224,6 +1222,61 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): }, ) + def test_query_devices_remote_down(self) -> None: + """Tests that querying keys for a remote user on an unreachable server returns + results in the "failures" property + """ + + remote_user_id = "@test:other" + local_user_id = "@test:test" + + # The backoff code treats time zero as special + self.reactor.advance(5) + + self.hs.get_federation_http_client().agent.request = mock.AsyncMock( # type: ignore[method-assign] + side_effect=Exception("boop") + ) + + e2e_handler = self.hs.get_e2e_keys_handler() + + query_result = self.get_success( + e2e_handler.query_devices( + { + "device_keys": {remote_user_id: []}, + }, + timeout=10, + from_user_id=local_user_id, + from_device_id="some_device_id", + ) + ) + + self.assertEqual( + query_result["failures"], + { + "other": { + "message": "Failed to send request: Exception: boop", + "status": 503, + } + }, + ) + + # Do it again: we should hit the backoff + query_result = self.get_success( + e2e_handler.query_devices( + { + "device_keys": {remote_user_id: []}, + }, + timeout=10, + from_user_id=local_user_id, + from_device_id="some_device_id", + ) + ) + + self.assertEqual( + query_result["failures"], + {"other": {"message": "Not ready for retry", "status": 503}}, + ) + @parameterized.expand( [ # The remote homeserver's response indicates that this user has 0/1/2 devices. From 62ae56a4ac955a789d965cfb3bb642985c798666 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2024 10:54:11 +0100 Subject: [PATCH 076/332] Add some more opentracing to sliding sync (#17501) This will make it easier to see what it is doing in jaeger. --- changelog.d/17501.misc | 1 + synapse/handlers/sliding_sync.py | 18 +++++++++++++++--- synapse/rest/client/sync.py | 20 ++++++++++++++++++-- 3 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17501.misc diff --git a/changelog.d/17501.misc b/changelog.d/17501.misc new file mode 100644 index 0000000000..ba96472acb --- /dev/null +++ b/changelog.d/17501.misc @@ -0,0 +1 @@ +Add some opentracing tags and logging to the experimental sliding sync implementation. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 84f2fa18ff..73414dbf69 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -41,7 +41,7 @@ from synapse.api.constants import AccountDataTypes, Direction, EventTypes, Membe from synapse.events import EventBase from synapse.events.utils import strip_event from synapse.handlers.relations import BundledAggregations -from synapse.logging.opentracing import start_active_span, tag_args, trace +from synapse.logging.opentracing import log_kv, start_active_span, tag_args, trace from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.databases.main.stream import CurrentStateDeltaMembership from synapse.storage.roommember import MemberSummary @@ -444,6 +444,7 @@ class SlidingSyncHandler: return result + @trace async def current_sync_for_user( self, sync_config: SlidingSyncConfig, @@ -682,8 +683,9 @@ class SlidingSyncHandler: if room_sync_result or not from_token: rooms[room_id] = room_sync_result - with start_active_span("sliding_sync.generate_room_entries"): - await concurrently_execute(handle_room, relevant_room_map, 10) + if relevant_room_map: + with start_active_span("sliding_sync.generate_room_entries"): + await concurrently_execute(handle_room, relevant_room_map, 10) extensions = await self.get_extensions_response( sync_config=sync_config, @@ -1161,6 +1163,7 @@ class SlidingSyncHandler: # return None + @trace async def filter_rooms( self, user: UserID, @@ -1284,6 +1287,7 @@ class SlidingSyncHandler: # Assemble a new sync room map but only with the `filtered_room_id_set` return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} + @trace async def sort_rooms( self, sync_room_map: Dict[str, _RoomMembershipForUser], @@ -1492,6 +1496,10 @@ class SlidingSyncHandler: else: assert_never(room_status.status) + log_kv({"sliding_sync.room_status": room_status}) + + log_kv({"sliding_sync.from_bound": from_bound, "sliding_sync.initial": initial}) + # Assemble the list of timeline events # # FIXME: It would be nice to make the `rooms` response more uniform regardless of @@ -1890,6 +1898,7 @@ class SlidingSyncHandler: highlight_count=0, ) + @trace async def get_extensions_response( self, sync_config: SlidingSyncConfig, @@ -1942,6 +1951,7 @@ class SlidingSyncHandler: account_data=account_data_response, ) + @trace async def get_to_device_extension_response( self, sync_config: SlidingSyncConfig, @@ -2016,6 +2026,7 @@ class SlidingSyncHandler: events=messages, ) + @trace async def get_e2ee_extension_response( self, sync_config: SlidingSyncConfig, @@ -2066,6 +2077,7 @@ class SlidingSyncHandler: device_unused_fallback_key_types=device_unused_fallback_key_types, ) + @trace async def get_account_data_extension_response( self, sync_config: SlidingSyncConfig, diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index bf3ac8d483..ccfce6bd53 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -52,7 +52,7 @@ from synapse.http.servlet import ( parse_string, ) from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import trace_with_opname +from synapse.logging.opentracing import log_kv, set_tag, trace_with_opname from synapse.rest.admin.experimental_features import ExperimentalFeature from synapse.types import JsonDict, Requester, SlidingSyncStreamToken, StreamToken from synapse.types.rest.client import SlidingSyncBody @@ -897,7 +897,23 @@ class SlidingSyncRestServlet(RestServlet): # by filter ID. For now, we will just prototype with always passing everything # in. body = parse_and_validate_json_object_from_request(request, SlidingSyncBody) - logger.info("Sliding sync request: %r", body) + + # Tag and log useful data to differentiate requests. + set_tag("sliding_sync.conn_id", body.conn_id or "") + log_kv( + { + "sliding_sync.lists": { + list_name: { + "ranges": list_config.ranges, + "timeline_limit": list_config.timeline_limit, + } + for list_name, list_config in (body.lists or {}).items() + }, + "sliding_sync.room_subscriptions": list( + (body.room_subscriptions or {}).keys() + ), + } + ) sync_config = SlidingSyncConfig( user=user, From be726724a828a91d26a55187e3988ccb47dd40c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jul 2024 11:44:54 +0100 Subject: [PATCH 077/332] Bump ruff from 0.5.4 to 0.5.5 (#17494) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index 945b91e022..417f6850b8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2358,29 +2358,29 @@ files = [ [[package]] name = "ruff" -version = "0.5.4" +version = "0.5.5" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.5.4-py3-none-linux_armv6l.whl", hash = "sha256:82acef724fc639699b4d3177ed5cc14c2a5aacd92edd578a9e846d5b5ec18ddf"}, - {file = "ruff-0.5.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:da62e87637c8838b325e65beee485f71eb36202ce8e3cdbc24b9fcb8b99a37be"}, - {file = "ruff-0.5.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e98ad088edfe2f3b85a925ee96da652028f093d6b9b56b76fc242d8abb8e2059"}, - {file = "ruff-0.5.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c55efbecc3152d614cfe6c2247a3054cfe358cefbf794f8c79c8575456efe19"}, - {file = "ruff-0.5.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9b85eaa1f653abd0a70603b8b7008d9e00c9fa1bbd0bf40dad3f0c0bdd06793"}, - {file = "ruff-0.5.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cf497a47751be8c883059c4613ba2f50dd06ec672692de2811f039432875278"}, - {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:09c14ed6a72af9ccc8d2e313d7acf7037f0faff43cde4b507e66f14e812e37f7"}, - {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:628f6b8f97b8bad2490240aa84f3e68f390e13fabc9af5c0d3b96b485921cd60"}, - {file = "ruff-0.5.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3520a00c0563d7a7a7c324ad7e2cde2355733dafa9592c671fb2e9e3cd8194c1"}, - {file = "ruff-0.5.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93789f14ca2244fb91ed481456f6d0bb8af1f75a330e133b67d08f06ad85b516"}, - {file = "ruff-0.5.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:029454e2824eafa25b9df46882f7f7844d36fd8ce51c1b7f6d97e2615a57bbcc"}, - {file = "ruff-0.5.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9492320eed573a13a0bc09a2957f17aa733fff9ce5bf00e66e6d4a88ec33813f"}, - {file = "ruff-0.5.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a6e1f62a92c645e2919b65c02e79d1f61e78a58eddaebca6c23659e7c7cb4ac7"}, - {file = "ruff-0.5.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:768fa9208df2bec4b2ce61dbc7c2ddd6b1be9fb48f1f8d3b78b3332c7d71c1ff"}, - {file = "ruff-0.5.4-py3-none-win32.whl", hash = "sha256:e1e7393e9c56128e870b233c82ceb42164966f25b30f68acbb24ed69ce9c3a4e"}, - {file = "ruff-0.5.4-py3-none-win_amd64.whl", hash = "sha256:58b54459221fd3f661a7329f177f091eb35cf7a603f01d9eb3eb11cc348d38c4"}, - {file = "ruff-0.5.4-py3-none-win_arm64.whl", hash = "sha256:bd53da65f1085fb5b307c38fd3c0829e76acf7b2a912d8d79cadcdb4875c1eb7"}, - {file = "ruff-0.5.4.tar.gz", hash = "sha256:2795726d5f71c4f4e70653273d1c23a8182f07dd8e48c12de5d867bfb7557eed"}, + {file = "ruff-0.5.5-py3-none-linux_armv6l.whl", hash = "sha256:605d589ec35d1da9213a9d4d7e7a9c761d90bba78fc8790d1c5e65026c1b9eaf"}, + {file = "ruff-0.5.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00817603822a3e42b80f7c3298c8269e09f889ee94640cd1fc7f9329788d7bf8"}, + {file = "ruff-0.5.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:187a60f555e9f865a2ff2c6984b9afeffa7158ba6e1eab56cb830404c942b0f3"}, + {file = "ruff-0.5.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe26fc46fa8c6e0ae3f47ddccfbb136253c831c3289bba044befe68f467bfb16"}, + {file = "ruff-0.5.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad25dd9c5faac95c8e9efb13e15803cd8bbf7f4600645a60ffe17c73f60779b"}, + {file = "ruff-0.5.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f70737c157d7edf749bcb952d13854e8f745cec695a01bdc6e29c29c288fc36e"}, + {file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:cfd7de17cef6ab559e9f5ab859f0d3296393bc78f69030967ca4d87a541b97a0"}, + {file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a09b43e02f76ac0145f86a08e045e2ea452066f7ba064fd6b0cdccb486f7c3e7"}, + {file = "ruff-0.5.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0b856cb19c60cd40198be5d8d4b556228e3dcd545b4f423d1ad812bfdca5884"}, + {file = "ruff-0.5.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3687d002f911e8a5faf977e619a034d159a8373514a587249cc00f211c67a091"}, + {file = "ruff-0.5.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ac9dc814e510436e30d0ba535f435a7f3dc97f895f844f5b3f347ec8c228a523"}, + {file = "ruff-0.5.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:af9bdf6c389b5add40d89b201425b531e0a5cceb3cfdcc69f04d3d531c6be74f"}, + {file = "ruff-0.5.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d40a8533ed545390ef8315b8e25c4bb85739b90bd0f3fe1280a29ae364cc55d8"}, + {file = "ruff-0.5.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:cab904683bf9e2ecbbe9ff235bfe056f0eba754d0168ad5407832928d579e7ab"}, + {file = "ruff-0.5.5-py3-none-win32.whl", hash = "sha256:696f18463b47a94575db635ebb4c178188645636f05e934fdf361b74edf1bb2d"}, + {file = "ruff-0.5.5-py3-none-win_amd64.whl", hash = "sha256:50f36d77f52d4c9c2f1361ccbfbd09099a1b2ea5d2b2222c586ab08885cf3445"}, + {file = "ruff-0.5.5-py3-none-win_arm64.whl", hash = "sha256:3191317d967af701f1b73a31ed5788795936e423b7acce82a2b63e26eb3e89d6"}, + {file = "ruff-0.5.5.tar.gz", hash = "sha256:cc5516bdb4858d972fbc31d246bdb390eab8df1a26e2353be2dbc0c2d7f5421a"}, ] [[package]] @@ -3215,4 +3215,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "e65fbd044230964cae8810c84289bcf0bc43b27532ea5a5ef8843eab4f6514af" +content-hash = "5f458ce53b7469844af2e0c5a9c5ef720736de5f080c4eb8d3a0e60286424f44" diff --git a/pyproject.toml b/pyproject.toml index 1adf8e087f..fe6289839c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -322,7 +322,7 @@ all = [ # This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" black = ">=22.7.0" -ruff = "0.5.4" +ruff = "0.5.5" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From c56b070e6fdc1d3aa3452ea81fd13d0eb9f82aa8 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 30 Jul 2024 15:23:23 +0100 Subject: [PATCH 078/332] Upgrade locked dependency on Twisted to 24.7.0rc1. (#17502) I also update the tests and HTTP Proxy code to fix it for this new Twisted release. Pulls in fix for https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7 Signed-off-by: Olivier 'reivilibre --- changelog.d/17502.misc | 1 + poetry.lock | 63 ++++++++++++--------------------- synapse/http/proxy.py | 12 +++++-- synapse/http/server.py | 4 ++- synapse/http/site.py | 2 +- tests/rest/client/test_login.py | 5 ++- tests/server.py | 26 +++++++++++--- tests/test_server.py | 9 +++-- 8 files changed, 65 insertions(+), 57 deletions(-) create mode 100644 changelog.d/17502.misc diff --git a/changelog.d/17502.misc b/changelog.d/17502.misc new file mode 100644 index 0000000000..bf1da4e044 --- /dev/null +++ b/changelog.d/17502.misc @@ -0,0 +1 @@ +Upgrade locked dependency on Twisted to 24.7.0rc1. \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 19393bb6b3..8f64ada346 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -836,18 +836,21 @@ testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-chec [[package]] name = "incremental" -version = "22.10.0" -description = "\"A small library that versions your Python projects.\"" +version = "24.7.2" +description = "A small library that versions your Python projects." optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "incremental-22.10.0-py2.py3-none-any.whl", hash = "sha256:b864a1f30885ee72c5ac2835a761b8fe8aa9c28b9395cacf27286602688d3e51"}, - {file = "incremental-22.10.0.tar.gz", hash = "sha256:912feeb5e0f7e0188e6f42241d2f450002e11bbc0937c65865045854c24c0bd0"}, + {file = "incremental-24.7.2-py3-none-any.whl", hash = "sha256:8cb2c3431530bec48ad70513931a760f446ad6c25e8333ca5d95e24b0ed7b8fe"}, + {file = "incremental-24.7.2.tar.gz", hash = "sha256:fb4f1d47ee60efe87d4f6f0ebb5f70b9760db2b2574c59c8e8912be4ebd464c9"}, ] +[package.dependencies] +setuptools = ">=61.0" +tomli = {version = "*", markers = "python_version < \"3.11\""} + [package.extras] -mypy = ["click (>=6.0)", "mypy (==0.812)", "twisted (>=16.4.0)"] -scripts = ["click (>=6.0)", "twisted (>=16.4.0)"] +scripts = ["click (>=6.0)"] [[package]] name = "isort" @@ -2726,13 +2729,13 @@ urllib3 = ">=1.26.0" [[package]] name = "twisted" -version = "24.3.0" +version = "24.7.0rc1" description = "An asynchronous networking framework written in Python" optional = false python-versions = ">=3.8.0" files = [ - {file = "twisted-24.3.0-py3-none-any.whl", hash = "sha256:039f2e6a49ab5108abd94de187fa92377abe5985c7a72d68d0ad266ba19eae63"}, - {file = "twisted-24.3.0.tar.gz", hash = "sha256:6b38b6ece7296b5e122c9eb17da2eeab3d98a198f50ca9efd00fb03e5b4fd4ae"}, + {file = "twisted-24.7.0rc1-py3-none-any.whl", hash = "sha256:f37d6656fe4e2871fab29d8952ae90bd6ca8b48a9e4dfa1b348f4cd62e6ba0bb"}, + {file = "twisted-24.7.0rc1.tar.gz", hash = "sha256:bbc4a2193ca34cfa32f626300746698a6d70fcd77d9c0b79a664c347e39634fc"}, ] [package.dependencies] @@ -2741,48 +2744,26 @@ automat = ">=0.8.0" constantly = ">=15.1" hyperlink = ">=17.1.1" idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""} -incremental = ">=22.10.0" +incremental = ">=24.7.0" pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""} service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""} -twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""} typing-extensions = ">=4.2.0" zope-interface = ">=5" [package.extras] -all-non-platform = ["twisted[conch,http2,serial,test,tls]", "twisted[conch,http2,serial,test,tls]"] +all-non-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"] -dev = ["coverage (>=6b1,<7)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "twisted[dev-release]", "twistedchecker (>=0.7,<1.0)"] +dev = ["coverage (>=7.5,<8.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "python-subunit (>=1.4,<2.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)"] dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"] -gtk-platform = ["pygobject", "pygobject", "twisted[all-non-platform]", "twisted[all-non-platform]"] +gtk-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pygobject", "pygobject", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"] -macos-platform = ["pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "twisted[all-non-platform]", "twisted[all-non-platform]"] -mypy = ["mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"] -osx-platform = ["twisted[macos-platform]", "twisted[macos-platform]"] +macos-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] +mypy = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "coverage (>=7.5,<8.0)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "idna (>=2.4)", "mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "priority (>=1.1.0,<2.0)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)", "types-pyopenssl", "types-setuptools"] +osx-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] serial = ["pyserial (>=3.0)", "pywin32 (!=226)"] test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"] tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"] -windows-platform = ["pywin32 (!=226)", "pywin32 (!=226)", "twisted[all-non-platform]", "twisted[all-non-platform]"] - -[[package]] -name = "twisted-iocpsupport" -version = "1.0.2" -description = "An extension for use in the twisted I/O Completion Ports reactor." -optional = false -python-versions = "*" -files = [ - {file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"}, - {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win32.whl", hash = "sha256:985c06a33f5c0dae92c71a036d1ea63872ee86a21dd9b01e1f287486f15524b4"}, - {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:81b3abe3527b367da0220482820cb12a16c661672b7bcfcde328902890d63323"}, - {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win32.whl", hash = "sha256:9dbb8823b49f06d4de52721b47de4d3b3026064ef4788ce62b1a21c57c3fff6f"}, - {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b9fed67cf0f951573f06d560ac2f10f2a4bbdc6697770113a2fc396ea2cb2565"}, - {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win32.whl", hash = "sha256:b76b4eed9b27fd63ddb0877efdd2d15835fdcb6baa745cb85b66e5d016ac2878"}, - {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:851b3735ca7e8102e661872390e3bce88f8901bece95c25a0c8bb9ecb8a23d32"}, - {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win32.whl", hash = "sha256:bf4133139d77fc706d8f572e6b7d82871d82ec7ef25d685c2351bdacfb701415"}, - {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:306becd6e22ab6e8e4f36b6bdafd9c92e867c98a5ce517b27fdd27760ee7ae41"}, - {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win32.whl", hash = "sha256:3c61742cb0bc6c1ac117a7e5f422c129832f0c295af49e01d8a6066df8cfc04d"}, - {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:b435857b9efcbfc12f8c326ef0383f26416272260455bbca2cd8d8eca470c546"}, - {file = "twisted_iocpsupport-1.0.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:7d972cfa8439bdcb35a7be78b7ef86d73b34b808c74be56dfa785c8a93b851bf"}, -] +windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "twisted-iocpsupport (>=1.0.2)", "twisted-iocpsupport (>=1.0.2)"] [[package]] name = "txredisapi" diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py index 5b5ded757b..97aa429e7d 100644 --- a/synapse/http/proxy.py +++ b/synapse/http/proxy.py @@ -62,6 +62,15 @@ HOP_BY_HOP_HEADERS = { "Upgrade", } +if hasattr(Headers, "_canonicalNameCaps"): + # Twisted < 24.7.0rc1 + _canonicalHeaderName = Headers()._canonicalNameCaps # type: ignore[attr-defined] +else: + # Twisted >= 24.7.0rc1 + # But note that `_encodeName` still exists on prior versions, + # it just encodes differently + _canonicalHeaderName = Headers()._encodeName + def parse_connection_header_value( connection_header_value: Optional[bytes], @@ -85,11 +94,10 @@ def parse_connection_header_value( The set of header names that should not be copied over from the remote response. The keys are capitalized in canonical capitalization. """ - headers = Headers() extra_headers_to_remove: Set[str] = set() if connection_header_value: extra_headers_to_remove = { - headers._canonicalNameCaps(connection_option.strip()).decode("ascii") + _canonicalHeaderName(connection_option.strip()).decode("ascii") for connection_option in connection_header_value.split(b",") } diff --git a/synapse/http/server.py b/synapse/http/server.py index 211795dc39..0d0c610b28 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -74,6 +74,7 @@ from synapse.api.errors import ( from synapse.config.homeserver import HomeServerConfig from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.opentracing import active_span, start_active_span, trace_servlet +from synapse.types import ISynapseReactor from synapse.util import json_encoder from synapse.util.caches import intern_dict from synapse.util.cancellation import is_function_cancellable @@ -868,7 +869,8 @@ async def _async_write_json_to_request_in_thread( with start_active_span("encode_json_response"): span = active_span() - json_str = await defer_to_thread(request.reactor, encode, span) + reactor: ISynapseReactor = request.reactor # type: ignore + json_str = await defer_to_thread(reactor, encode, span) _write_bytes_to_request(request, json_str) diff --git a/synapse/http/site.py b/synapse/http/site.py index a5b5780679..af169ba51e 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -683,7 +683,7 @@ class SynapseSite(ProxySite): self.access_logger = logging.getLogger(logger_name) self.server_version_string = server_version_string.encode("ascii") - def log(self, request: SynapseRequest) -> None: + def log(self, request: SynapseRequest) -> None: # type: ignore[override] pass diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 3fb77fd9dd..2b1e44381b 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -969,9 +969,8 @@ class CASTestCase(unittest.HomeserverTestCase): # Test that the response is HTML. self.assertEqual(channel.code, 200, channel.result) content_type_header_value = "" - for header in channel.result.get("headers", []): - if header[0] == b"Content-Type": - content_type_header_value = header[1].decode("utf8") + for header in channel.headers.getRawHeaders("Content-Type", []): + content_type_header_value = header self.assertTrue(content_type_header_value.startswith("text/html")) diff --git a/tests/server.py b/tests/server.py index f1cd0f76be..38ca095073 100644 --- a/tests/server.py +++ b/tests/server.py @@ -198,17 +198,35 @@ class FakeChannel: def headers(self) -> Headers: if not self.result: raise Exception("No result yet.") - h = Headers() - for i in self.result["headers"]: - h.addRawHeader(*i) + + h = self.result["headers"] + assert isinstance(h, Headers) return h def writeHeaders( - self, version: bytes, code: bytes, reason: bytes, headers: Headers + self, + version: bytes, + code: bytes, + reason: bytes, + headers: Union[Headers, List[Tuple[bytes, bytes]]], ) -> None: self.result["version"] = version self.result["code"] = code self.result["reason"] = reason + + if isinstance(headers, list): + # Support prior to Twisted 24.7.0rc1 + new_headers = Headers() + for k, v in headers: + assert isinstance(k, bytes), f"key is not of type bytes: {k!r}" + assert isinstance(v, bytes), f"value is not of type bytes: {v!r}" + new_headers.addRawHeader(k, v) + headers = new_headers + + assert isinstance( + headers, Headers + ), f"headers are of the wrong type: {headers!r}" + self.result["headers"] = headers def write(self, data: bytes) -> None: diff --git a/tests/test_server.py b/tests/test_server.py index 0910ea5f28..9ff2589497 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -392,8 +392,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): ) self.assertEqual(channel.code, 301) - headers = channel.result["headers"] - location_headers = [v for k, v in headers if k == b"Location"] + location_headers = channel.headers.getRawHeaders(b"Location", []) self.assertEqual(location_headers, [b"/look/an/eagle"]) def test_redirect_exception_with_cookie(self) -> None: @@ -415,10 +414,10 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): ) self.assertEqual(channel.code, 304) - headers = channel.result["headers"] - location_headers = [v for k, v in headers if k == b"Location"] + headers = channel.headers + location_headers = headers.getRawHeaders(b"Location", []) self.assertEqual(location_headers, [b"/no/over/there"]) - cookies_headers = [v for k, v in headers if k == b"Set-Cookie"] + cookies_headers = headers.getRawHeaders(b"Set-Cookie", []) self.assertEqual(cookies_headers, [b"session=yespls"]) def test_head_request(self) -> None: From dcad81082c2d34f734a6cc308347336396da9e19 Mon Sep 17 00:00:00 2001 From: Olivier 'reivilibre Date: Tue, 30 Jul 2024 16:16:35 +0100 Subject: [PATCH 079/332] 1.111.1 --- CHANGES.md | 20 ++++++++++++++++++++ changelog.d/17502.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 27 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/17502.misc diff --git a/CHANGES.md b/CHANGES.md index 0a2b816ed1..0997083c40 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,23 @@ +# Synapse 1.111.1 (2024-07-30) + +This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7). + +This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request. +If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality. + +With that said, despite being a high severity issue, we consider it unlikely that Synapse installations will be affected. +The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration. + +Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today. + +**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. Please manually install the new version of Twisted using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time. + + +### Internal Changes + +- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502)) + + # Synapse 1.111.0 (2024-07-16) No significant changes since 1.111.0rc2. diff --git a/changelog.d/17502.misc b/changelog.d/17502.misc deleted file mode 100644 index bf1da4e044..0000000000 --- a/changelog.d/17502.misc +++ /dev/null @@ -1 +0,0 @@ -Upgrade locked dependency on Twisted to 24.7.0rc1. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 0470e25f2d..11dea73ed0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.111.1) stable; urgency=medium + + * New Synapse release 1.111.1. + + -- Synapse Packaging team Tue, 30 Jul 2024 16:13:52 +0100 + matrix-synapse-py3 (1.111.0) stable; urgency=medium * New Synapse release 1.111.0. diff --git a/pyproject.toml b/pyproject.toml index 0f040fc612..c285d10640 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.111.0" +version = "1.111.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From e4868f8a1e0e4e5898facf8819596fda5c8e8721 Mon Sep 17 00:00:00 2001 From: Olivier 'reivilibre Date: Tue, 30 Jul 2024 16:23:58 +0100 Subject: [PATCH 080/332] Add bold emphasis to some parts of the changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 0997083c40..561638ecbc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,12 +5,12 @@ This security release is to update our locked dependency on Twisted to 24.7.0rc1 This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request. If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality. -With that said, despite being a high severity issue, we consider it unlikely that Synapse installations will be affected. +With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**. The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration. Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today. -**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. Please manually install the new version of Twisted using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time. +**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time. ### Internal Changes From 8b449a8ce61bce4a231717ab2895db7d6d01a4c6 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 30 Jul 2024 15:23:23 +0100 Subject: [PATCH 081/332] Upgrade locked dependency on Twisted to 24.7.0rc1. (#17502) I also update the tests and HTTP Proxy code to fix it for this new Twisted release. Pulls in fix for https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7 Signed-off-by: Olivier 'reivilibre --- changelog.d/17502.misc | 1 + poetry.lock | 63 ++++++++++++--------------------- synapse/http/proxy.py | 12 +++++-- synapse/http/server.py | 4 ++- synapse/http/site.py | 2 +- tests/rest/client/test_login.py | 5 ++- tests/server.py | 26 +++++++++++--- tests/test_server.py | 9 +++-- 8 files changed, 65 insertions(+), 57 deletions(-) create mode 100644 changelog.d/17502.misc diff --git a/changelog.d/17502.misc b/changelog.d/17502.misc new file mode 100644 index 0000000000..bf1da4e044 --- /dev/null +++ b/changelog.d/17502.misc @@ -0,0 +1 @@ +Upgrade locked dependency on Twisted to 24.7.0rc1. \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 945b91e022..7359930983 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -821,18 +821,21 @@ testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-chec [[package]] name = "incremental" -version = "22.10.0" -description = "\"A small library that versions your Python projects.\"" +version = "24.7.2" +description = "A small library that versions your Python projects." optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "incremental-22.10.0-py2.py3-none-any.whl", hash = "sha256:b864a1f30885ee72c5ac2835a761b8fe8aa9c28b9395cacf27286602688d3e51"}, - {file = "incremental-22.10.0.tar.gz", hash = "sha256:912feeb5e0f7e0188e6f42241d2f450002e11bbc0937c65865045854c24c0bd0"}, + {file = "incremental-24.7.2-py3-none-any.whl", hash = "sha256:8cb2c3431530bec48ad70513931a760f446ad6c25e8333ca5d95e24b0ed7b8fe"}, + {file = "incremental-24.7.2.tar.gz", hash = "sha256:fb4f1d47ee60efe87d4f6f0ebb5f70b9760db2b2574c59c8e8912be4ebd464c9"}, ] +[package.dependencies] +setuptools = ">=61.0" +tomli = {version = "*", markers = "python_version < \"3.11\""} + [package.extras] -mypy = ["click (>=6.0)", "mypy (==0.812)", "twisted (>=16.4.0)"] -scripts = ["click (>=6.0)", "twisted (>=16.4.0)"] +scripts = ["click (>=6.0)"] [[package]] name = "isort" @@ -2711,13 +2714,13 @@ urllib3 = ">=1.26.0" [[package]] name = "twisted" -version = "24.3.0" +version = "24.7.0rc1" description = "An asynchronous networking framework written in Python" optional = false python-versions = ">=3.8.0" files = [ - {file = "twisted-24.3.0-py3-none-any.whl", hash = "sha256:039f2e6a49ab5108abd94de187fa92377abe5985c7a72d68d0ad266ba19eae63"}, - {file = "twisted-24.3.0.tar.gz", hash = "sha256:6b38b6ece7296b5e122c9eb17da2eeab3d98a198f50ca9efd00fb03e5b4fd4ae"}, + {file = "twisted-24.7.0rc1-py3-none-any.whl", hash = "sha256:f37d6656fe4e2871fab29d8952ae90bd6ca8b48a9e4dfa1b348f4cd62e6ba0bb"}, + {file = "twisted-24.7.0rc1.tar.gz", hash = "sha256:bbc4a2193ca34cfa32f626300746698a6d70fcd77d9c0b79a664c347e39634fc"}, ] [package.dependencies] @@ -2726,48 +2729,26 @@ automat = ">=0.8.0" constantly = ">=15.1" hyperlink = ">=17.1.1" idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""} -incremental = ">=22.10.0" +incremental = ">=24.7.0" pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""} service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""} -twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""} typing-extensions = ">=4.2.0" zope-interface = ">=5" [package.extras] -all-non-platform = ["twisted[conch,http2,serial,test,tls]", "twisted[conch,http2,serial,test,tls]"] +all-non-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"] -dev = ["coverage (>=6b1,<7)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "twisted[dev-release]", "twistedchecker (>=0.7,<1.0)"] +dev = ["coverage (>=7.5,<8.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "python-subunit (>=1.4,<2.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)"] dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"] -gtk-platform = ["pygobject", "pygobject", "twisted[all-non-platform]", "twisted[all-non-platform]"] +gtk-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pygobject", "pygobject", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"] -macos-platform = ["pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "twisted[all-non-platform]", "twisted[all-non-platform]"] -mypy = ["mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"] -osx-platform = ["twisted[macos-platform]", "twisted[macos-platform]"] +macos-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] +mypy = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "coverage (>=7.5,<8.0)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "idna (>=2.4)", "mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "priority (>=1.1.0,<2.0)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)", "types-pyopenssl", "types-setuptools"] +osx-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] serial = ["pyserial (>=3.0)", "pywin32 (!=226)"] test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"] tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"] -windows-platform = ["pywin32 (!=226)", "pywin32 (!=226)", "twisted[all-non-platform]", "twisted[all-non-platform]"] - -[[package]] -name = "twisted-iocpsupport" -version = "1.0.2" -description = "An extension for use in the twisted I/O Completion Ports reactor." -optional = false -python-versions = "*" -files = [ - {file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"}, - {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win32.whl", hash = "sha256:985c06a33f5c0dae92c71a036d1ea63872ee86a21dd9b01e1f287486f15524b4"}, - {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:81b3abe3527b367da0220482820cb12a16c661672b7bcfcde328902890d63323"}, - {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win32.whl", hash = "sha256:9dbb8823b49f06d4de52721b47de4d3b3026064ef4788ce62b1a21c57c3fff6f"}, - {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b9fed67cf0f951573f06d560ac2f10f2a4bbdc6697770113a2fc396ea2cb2565"}, - {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win32.whl", hash = "sha256:b76b4eed9b27fd63ddb0877efdd2d15835fdcb6baa745cb85b66e5d016ac2878"}, - {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:851b3735ca7e8102e661872390e3bce88f8901bece95c25a0c8bb9ecb8a23d32"}, - {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win32.whl", hash = "sha256:bf4133139d77fc706d8f572e6b7d82871d82ec7ef25d685c2351bdacfb701415"}, - {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:306becd6e22ab6e8e4f36b6bdafd9c92e867c98a5ce517b27fdd27760ee7ae41"}, - {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win32.whl", hash = "sha256:3c61742cb0bc6c1ac117a7e5f422c129832f0c295af49e01d8a6066df8cfc04d"}, - {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:b435857b9efcbfc12f8c326ef0383f26416272260455bbca2cd8d8eca470c546"}, - {file = "twisted_iocpsupport-1.0.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:7d972cfa8439bdcb35a7be78b7ef86d73b34b808c74be56dfa785c8a93b851bf"}, -] +windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "twisted-iocpsupport (>=1.0.2)", "twisted-iocpsupport (>=1.0.2)"] [[package]] name = "txredisapi" diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py index 5b5ded757b..97aa429e7d 100644 --- a/synapse/http/proxy.py +++ b/synapse/http/proxy.py @@ -62,6 +62,15 @@ HOP_BY_HOP_HEADERS = { "Upgrade", } +if hasattr(Headers, "_canonicalNameCaps"): + # Twisted < 24.7.0rc1 + _canonicalHeaderName = Headers()._canonicalNameCaps # type: ignore[attr-defined] +else: + # Twisted >= 24.7.0rc1 + # But note that `_encodeName` still exists on prior versions, + # it just encodes differently + _canonicalHeaderName = Headers()._encodeName + def parse_connection_header_value( connection_header_value: Optional[bytes], @@ -85,11 +94,10 @@ def parse_connection_header_value( The set of header names that should not be copied over from the remote response. The keys are capitalized in canonical capitalization. """ - headers = Headers() extra_headers_to_remove: Set[str] = set() if connection_header_value: extra_headers_to_remove = { - headers._canonicalNameCaps(connection_option.strip()).decode("ascii") + _canonicalHeaderName(connection_option.strip()).decode("ascii") for connection_option in connection_header_value.split(b",") } diff --git a/synapse/http/server.py b/synapse/http/server.py index 211795dc39..0d0c610b28 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -74,6 +74,7 @@ from synapse.api.errors import ( from synapse.config.homeserver import HomeServerConfig from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.opentracing import active_span, start_active_span, trace_servlet +from synapse.types import ISynapseReactor from synapse.util import json_encoder from synapse.util.caches import intern_dict from synapse.util.cancellation import is_function_cancellable @@ -868,7 +869,8 @@ async def _async_write_json_to_request_in_thread( with start_active_span("encode_json_response"): span = active_span() - json_str = await defer_to_thread(request.reactor, encode, span) + reactor: ISynapseReactor = request.reactor # type: ignore + json_str = await defer_to_thread(reactor, encode, span) _write_bytes_to_request(request, json_str) diff --git a/synapse/http/site.py b/synapse/http/site.py index a5b5780679..af169ba51e 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -683,7 +683,7 @@ class SynapseSite(ProxySite): self.access_logger = logging.getLogger(logger_name) self.server_version_string = server_version_string.encode("ascii") - def log(self, request: SynapseRequest) -> None: + def log(self, request: SynapseRequest) -> None: # type: ignore[override] pass diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 3fb77fd9dd..2b1e44381b 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -969,9 +969,8 @@ class CASTestCase(unittest.HomeserverTestCase): # Test that the response is HTML. self.assertEqual(channel.code, 200, channel.result) content_type_header_value = "" - for header in channel.result.get("headers", []): - if header[0] == b"Content-Type": - content_type_header_value = header[1].decode("utf8") + for header in channel.headers.getRawHeaders("Content-Type", []): + content_type_header_value = header self.assertTrue(content_type_header_value.startswith("text/html")) diff --git a/tests/server.py b/tests/server.py index 85602e6953..3e377585ce 100644 --- a/tests/server.py +++ b/tests/server.py @@ -198,17 +198,35 @@ class FakeChannel: def headers(self) -> Headers: if not self.result: raise Exception("No result yet.") - h = Headers() - for i in self.result["headers"]: - h.addRawHeader(*i) + + h = self.result["headers"] + assert isinstance(h, Headers) return h def writeHeaders( - self, version: bytes, code: bytes, reason: bytes, headers: Headers + self, + version: bytes, + code: bytes, + reason: bytes, + headers: Union[Headers, List[Tuple[bytes, bytes]]], ) -> None: self.result["version"] = version self.result["code"] = code self.result["reason"] = reason + + if isinstance(headers, list): + # Support prior to Twisted 24.7.0rc1 + new_headers = Headers() + for k, v in headers: + assert isinstance(k, bytes), f"key is not of type bytes: {k!r}" + assert isinstance(v, bytes), f"value is not of type bytes: {v!r}" + new_headers.addRawHeader(k, v) + headers = new_headers + + assert isinstance( + headers, Headers + ), f"headers are of the wrong type: {headers!r}" + self.result["headers"] = headers def write(self, data: bytes) -> None: diff --git a/tests/test_server.py b/tests/test_server.py index 0910ea5f28..9ff2589497 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -392,8 +392,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): ) self.assertEqual(channel.code, 301) - headers = channel.result["headers"] - location_headers = [v for k, v in headers if k == b"Location"] + location_headers = channel.headers.getRawHeaders(b"Location", []) self.assertEqual(location_headers, [b"/look/an/eagle"]) def test_redirect_exception_with_cookie(self) -> None: @@ -415,10 +414,10 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): ) self.assertEqual(channel.code, 304) - headers = channel.result["headers"] - location_headers = [v for k, v in headers if k == b"Location"] + headers = channel.headers + location_headers = headers.getRawHeaders(b"Location", []) self.assertEqual(location_headers, [b"/no/over/there"]) - cookies_headers = [v for k, v in headers if k == b"Set-Cookie"] + cookies_headers = headers.getRawHeaders(b"Set-Cookie", []) self.assertEqual(cookies_headers, [b"session=yespls"]) def test_head_request(self) -> None: From 37f9876ccfdd9963cda4ff802882b9eec037877a Mon Sep 17 00:00:00 2001 From: Olivier 'reivilibre Date: Tue, 30 Jul 2024 17:24:09 +0100 Subject: [PATCH 082/332] 1.112.0 --- CHANGES.md | 25 +++++++++++++++++++++++++ changelog.d/17502.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 32 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/17502.misc diff --git a/CHANGES.md b/CHANGES.md index f869674ace..972b7ee3ed 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,30 @@ +# Synapse 1.112.0 (2024-07-30) + +This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7). + +Note that this security fix is also available as **Synapse 1.111.1**, which does not include the rest of the changes in Synapse 1.112.0. + +This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request. +If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality. + +With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**. +The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration. + +Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today. + +**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time. + +### Internal Changes + +- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502)) + + # Synapse 1.112.0rc1 (2024-07-23) +Please note that this release candidate does not include the security dependency update +included in version 1.111.1 as this version was released before 1.111.1. +The same security fix can be found in the full release of 1.112.0. + ### Features - Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416)) diff --git a/changelog.d/17502.misc b/changelog.d/17502.misc deleted file mode 100644 index bf1da4e044..0000000000 --- a/changelog.d/17502.misc +++ /dev/null @@ -1 +0,0 @@ -Upgrade locked dependency on Twisted to 24.7.0rc1. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 5209b9f5fd..873fde44a8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.112.0) stable; urgency=medium + + * New Synapse release 1.112.0. + + -- Synapse Packaging team Tue, 30 Jul 2024 17:15:48 +0100 + matrix-synapse-py3 (1.112.0~rc1) stable; urgency=medium * New Synapse release 1.112.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 0b5dc418e4..521b279390 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.112.0rc1" +version = "1.112.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 2f6b86e79adf9ed969b492a7c91a19d08f345f2f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jul 2024 17:32:16 +0100 Subject: [PATCH 083/332] Bump serde_json from 1.0.120 to 1.0.121 (#17493) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e9adfcbdc3..333499e197 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,11 +505,12 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.120" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" +checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] From dbc2290cbe7e32634df8b7cf600dbabf93876413 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jul 2024 17:32:49 +0100 Subject: [PATCH 084/332] Bump bcrypt from 4.1.3 to 4.2.0 (#17495) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 56 ++++++++++++++++++++++++++--------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/poetry.lock b/poetry.lock index e4d5097724..1bf13dbece 100644 --- a/poetry.lock +++ b/poetry.lock @@ -67,38 +67,38 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"] [[package]] name = "bcrypt" -version = "4.1.3" +version = "4.2.0" description = "Modern password hashing for your software and your servers" optional = false python-versions = ">=3.7" files = [ - {file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"}, - {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"}, - {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a"}, - {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05"}, - {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3"}, - {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15"}, - {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d"}, - {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286"}, - {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64"}, - {file = "bcrypt-4.1.3-cp37-abi3-win32.whl", hash = "sha256:037c5bf7c196a63dcce75545c8874610c600809d5d82c305dd327cd4969995bf"}, - {file = "bcrypt-4.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:8a893d192dfb7c8e883c4576813bf18bb9d59e2cfd88b68b725990f033f1b978"}, - {file = "bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c"}, - {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a"}, - {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84"}, - {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08"}, - {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611"}, - {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6"}, - {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834"}, - {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73"}, - {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d"}, - {file = "bcrypt-4.1.3-cp39-abi3-win32.whl", hash = "sha256:6004f5229b50f8493c49232b8e75726b568535fd300e5039e255d919fc3a07f2"}, - {file = "bcrypt-4.1.3-cp39-abi3-win_amd64.whl", hash = "sha256:2505b54afb074627111b5a8dc9b6ae69d0f01fea65c2fcaea403448c503d3991"}, - {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:cb9c707c10bddaf9e5ba7cdb769f3e889e60b7d4fea22834b261f51ca2b89fed"}, - {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9f8ea645eb94fb6e7bea0cf4ba121c07a3a182ac52876493870033141aa687bc"}, - {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f44a97780677e7ac0ca393bd7982b19dbbd8d7228c1afe10b128fd9550eef5f1"}, - {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d84702adb8f2798d813b17d8187d27076cca3cd52fe3686bb07a9083930ce650"}, - {file = "bcrypt-4.1.3.tar.gz", hash = "sha256:2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623"}, + {file = "bcrypt-4.2.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d84cf6d877918620b687b8fd1bf7781d11e8a0998f576c7aa939776b512b98d"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:1bb429fedbe0249465cdd85a58e8376f31bb315e484f16e68ca4c786dcc04291"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:655ea221910bcac76ea08aaa76df427ef8625f92e55a8ee44fbf7753dbabb328"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:1ee38e858bf5d0287c39b7a1fc59eec64bbf880c7d504d3a06a96c16e14058e7"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0da52759f7f30e83f1e30a888d9163a81353ef224d82dc58eb5bb52efcabc399"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3698393a1b1f1fd5714524193849d0c6d524d33523acca37cd28f02899285060"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:762a2c5fb35f89606a9fde5e51392dad0cd1ab7ae64149a8b935fe8d79dd5ed7"}, + {file = "bcrypt-4.2.0-cp37-abi3-win32.whl", hash = "sha256:5a1e8aa9b28ae28020a3ac4b053117fb51c57a010b9f969603ed885f23841458"}, + {file = "bcrypt-4.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:8f6ede91359e5df88d1f5c1ef47428a4420136f3ce97763e31b86dd8280fbdf5"}, + {file = "bcrypt-4.2.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:c52aac18ea1f4a4f65963ea4f9530c306b56ccd0c6f8c8da0c06976e34a6e841"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bbbfb2734f0e4f37c5136130405332640a1e46e6b23e000eeff2ba8d005da68"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3413bd60460f76097ee2e0a493ccebe4a7601918219c02f503984f0a7ee0aebe"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8d7bb9c42801035e61c109c345a28ed7e84426ae4865511eb82e913df18f58c2"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3d3a6d28cb2305b43feac298774b997e372e56c7c7afd90a12b3dc49b189151c"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9c1c4ad86351339c5f320ca372dfba6cb6beb25e8efc659bedd918d921956bae"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:27fe0f57bb5573104b5a6de5e4153c60814c711b29364c10a75a54bb6d7ff48d"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:8ac68872c82f1add6a20bd489870c71b00ebacd2e9134a8aa3f98a0052ab4b0e"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8"}, + {file = "bcrypt-4.2.0-cp39-abi3-win32.whl", hash = "sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34"}, + {file = "bcrypt-4.2.0-cp39-abi3-win_amd64.whl", hash = "sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9"}, + {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a"}, + {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db"}, + {file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:1ff39b78a52cf03fdf902635e4c81e544714861ba3f0efc56558979dd4f09170"}, + {file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184"}, + {file = "bcrypt-4.2.0.tar.gz", hash = "sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221"}, ] [package.extras] From 7e997fb8b14d2218465d18c6009cdea2200f15e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jul 2024 17:33:18 +0100 Subject: [PATCH 085/332] Bump types-pyopenssl from 24.1.0.20240425 to 24.1.0.20240722 (#17496) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1bf13dbece..02f56257fa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2875,13 +2875,13 @@ files = [ [[package]] name = "types-pyopenssl" -version = "24.1.0.20240425" +version = "24.1.0.20240722" description = "Typing stubs for pyOpenSSL" optional = false python-versions = ">=3.8" files = [ - {file = "types-pyOpenSSL-24.1.0.20240425.tar.gz", hash = "sha256:0a7e82626c1983dc8dc59292bf20654a51c3c3881bcbb9b337c1da6e32f0204e"}, - {file = "types_pyOpenSSL-24.1.0.20240425-py3-none-any.whl", hash = "sha256:f51a156835555dd2a1f025621e8c4fbe7493470331afeef96884d1d29bf3a473"}, + {file = "types-pyOpenSSL-24.1.0.20240722.tar.gz", hash = "sha256:47913b4678a01d879f503a12044468221ed8576263c1540dcb0484ca21b08c39"}, + {file = "types_pyOpenSSL-24.1.0.20240722-py3-none-any.whl", hash = "sha256:6a7a5d2ec042537934cfb4c9d4deb0e16c4c6250b09358df1f083682fe6fda54"}, ] [package.dependencies] From f76dc9923c8f8d912ac0d29ef3321c6e196ec843 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jul 2024 17:33:43 +0100 Subject: [PATCH 086/332] Bump types-setuptools from 70.1.0.20240627 to 71.1.0.20240726 (#17497) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 02f56257fa..7d8334515a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2915,13 +2915,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "70.1.0.20240627" +version = "71.1.0.20240726" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types-setuptools-70.1.0.20240627.tar.gz", hash = "sha256:385907a47b5cf302b928ce07953cd91147d5de6f3da604c31905fdf0ec309e83"}, - {file = "types_setuptools-70.1.0.20240627-py3-none-any.whl", hash = "sha256:c7bdf05cd0a8b66868b4774c7b3c079d01ae025d8c9562bfc8bf2ff44d263c9c"}, + {file = "types-setuptools-71.1.0.20240726.tar.gz", hash = "sha256:85ba28e9461bb1be86ebba4db0f1c2408f2b11115b1966334ea9dc464e29303e"}, + {file = "types_setuptools-71.1.0.20240726-py3-none-any.whl", hash = "sha256:a7775376f36e0ff09bcad236bf265777590a66b11623e48c20bfc30f1444ea36"}, ] [[package]] From b221f0b84b984d236ea11383cc21f6d07ca3c2ec Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 30 Jul 2024 12:49:55 -0500 Subject: [PATCH 087/332] Sliding Sync: Add receipts extension (MSC3960) (#17489) [MSC3960](https://github.com/matrix-org/matrix-spec-proposals/pull/3960): Receipts extension Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17489.feature | 1 + synapse/handlers/receipts.py | 4 +- synapse/handlers/sliding_sync.py | 268 ++++-- synapse/rest/client/sync.py | 6 + synapse/types/handlers/__init__.py | 20 +- synapse/types/rest/client/__init__.py | 18 + tests/rest/client/test_sync.py | 1135 +++++++++++++++++++------ 7 files changed, 1127 insertions(+), 325 deletions(-) create mode 100644 changelog.d/17489.feature diff --git a/changelog.d/17489.feature b/changelog.d/17489.feature new file mode 100644 index 0000000000..5ace1e675e --- /dev/null +++ b/changelog.d/17489.feature @@ -0,0 +1 @@ +Add receipts extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 8674a8fcdd..d04c76be2a 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -286,8 +286,10 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]): room_ids: Iterable[str], is_guest: bool, explicit_room_id: Optional[str] = None, + to_key: Optional[MultiWriterStreamToken] = None, ) -> Tuple[List[JsonMapping], MultiWriterStreamToken]: - to_key = self.get_current_key() + if to_key is None: + to_key = self.get_current_key() if from_key == to_key: return [], to_key diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 73414dbf69..7a734f6712 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -49,6 +49,7 @@ from synapse.types import ( DeviceListUpdates, JsonDict, JsonMapping, + MultiWriterStreamToken, PersistedEventPosition, Requester, RoomStreamToken, @@ -493,8 +494,7 @@ class SlidingSyncHandler: # Assemble sliding window lists lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} - # Keep track of the rooms that we're going to display and need to fetch more - # info about + # Keep track of the rooms that we can display and need to fetch more info about relevant_room_map: Dict[str, RoomSyncConfig] = {} if has_lists and sync_config.lists is not None: sync_room_map = await self.filter_rooms_relevant_for_sync( @@ -622,6 +622,8 @@ class SlidingSyncHandler: # Filter out rooms that haven't received updates and we've sent down # previously. + # Keep track of the rooms that we're going to display and need to fetch more info about + relevant_rooms_to_send_map = relevant_room_map if from_token: rooms_should_send = set() @@ -659,7 +661,7 @@ class SlidingSyncHandler: relevant_room_map.keys(), from_token.stream_token.room_key ) rooms_should_send.update(rooms_that_have_updates) - relevant_room_map = { + relevant_rooms_to_send_map = { room_id: room_sync_config for room_id, room_sync_config in relevant_room_map.items() if room_id in rooms_should_send @@ -671,7 +673,7 @@ class SlidingSyncHandler: room_sync_result = await self.get_room_sync_data( sync_config=sync_config, room_id=room_id, - room_sync_config=relevant_room_map[room_id], + room_sync_config=relevant_rooms_to_send_map[room_id], room_membership_for_user_at_to_token=room_membership_for_user_map[ room_id ], @@ -683,13 +685,20 @@ class SlidingSyncHandler: if room_sync_result or not from_token: rooms[room_id] = room_sync_result - if relevant_room_map: + if relevant_rooms_to_send_map: with start_active_span("sliding_sync.generate_room_entries"): - await concurrently_execute(handle_room, relevant_room_map, 10) + await concurrently_execute(handle_room, relevant_rooms_to_send_map, 10) extensions = await self.get_extensions_response( sync_config=sync_config, - lists=lists, + actual_lists=lists, + # We're purposely using `relevant_room_map` instead of + # `relevant_rooms_to_send_map` here. This needs to be all room_ids we could + # send regardless of whether they have an event update or not. The + # extensions care about more than just normal events in the rooms (like + # account data, read receipts, typing indicators, to-device messages, etc). + actual_room_ids=set(relevant_room_map.keys()), + actual_room_response_map=rooms, from_token=from_token, to_token=to_token, ) @@ -698,7 +707,7 @@ class SlidingSyncHandler: connection_position = await self.connection_store.record_rooms( sync_config=sync_config, from_token=from_token, - sent_room_ids=relevant_room_map.keys(), + sent_room_ids=relevant_rooms_to_send_map.keys(), # TODO: We need to calculate which rooms have had updates since the `from_token` but were not included in the `sent_room_ids` unsent_room_ids=[], ) @@ -1902,7 +1911,9 @@ class SlidingSyncHandler: async def get_extensions_response( self, sync_config: SlidingSyncConfig, - lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], to_token: StreamToken, from_token: Optional[SlidingSyncStreamToken], ) -> SlidingSyncResult.Extensions: @@ -1910,7 +1921,11 @@ class SlidingSyncHandler: Args: sync_config: Sync configuration - lists: Sliding window API. A map of list key to list results. + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + actual_room_response_map: A map of room ID to room results in the the + Sliding Sync response. to_token: The point in the stream to sync up to. from_token: The point in the stream to sync from. """ @@ -1939,18 +1954,103 @@ class SlidingSyncHandler: if sync_config.extensions.account_data is not None: account_data_response = await self.get_account_data_extension_response( sync_config=sync_config, - lists=lists, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, account_data_request=sync_config.extensions.account_data, to_token=to_token, from_token=from_token, ) + receipts_response = None + if sync_config.extensions.receipts is not None: + receipts_response = await self.get_receipts_extension_response( + sync_config=sync_config, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + actual_room_response_map=actual_room_response_map, + receipts_request=sync_config.extensions.receipts, + to_token=to_token, + from_token=from_token, + ) + return SlidingSyncResult.Extensions( to_device=to_device_response, e2ee=e2ee_response, account_data=account_data_response, + receipts=receipts_response, ) + def find_relevant_room_ids_for_extension( + self, + requested_lists: Optional[List[str]], + requested_room_ids: Optional[List[str]], + actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + ) -> Set[str]: + """ + Handle the reserved `lists`/`rooms` keys for extensions. Extensions should only + return results for rooms in the Sliding Sync response. This matches up the + requested rooms/lists with the actual lists/rooms in the Sliding Sync response. + + {"lists": []} // Do not process any lists. + {"lists": ["rooms", "dms"]} // Process only a subset of lists. + {"lists": ["*"]} // Process all lists defined in the Sliding Window API. (This is the default.) + + {"rooms": []} // Do not process any specific rooms. + {"rooms": ["!a:b", "!c:d"]} // Process only a subset of room subscriptions. + {"rooms": ["*"]} // Process all room subscriptions defined in the Room Subscription API. (This is the default.) + + Args: + requested_lists: The `lists` from the extension request. + requested_room_ids: The `rooms` from the extension request. + actual_lists: The actual lists from the Sliding Sync response. + actual_room_ids: The actual room subscriptions from the Sliding Sync request. + """ + + # We only want to include account data for rooms that are already in the sliding + # sync response AND that were requested in the account data request. + relevant_room_ids: Set[str] = set() + + # See what rooms from the room subscriptions we should get account data for + if requested_room_ids is not None: + for room_id in requested_room_ids: + # A wildcard means we process all rooms from the room subscriptions + if room_id == "*": + relevant_room_ids.update(actual_room_ids) + break + + if room_id in actual_room_ids: + relevant_room_ids.add(room_id) + + # See what rooms from the sliding window lists we should get account data for + if requested_lists is not None: + for list_key in requested_lists: + # Just some typing because we share the variable name in multiple places + actual_list: Optional[SlidingSyncResult.SlidingWindowList] = None + + # A wildcard means we process rooms from all lists + if list_key == "*": + for actual_list in actual_lists.values(): + # We only expect a single SYNC operation for any list + assert len(actual_list.ops) == 1 + sync_op = actual_list.ops[0] + assert sync_op.op == OperationType.SYNC + + relevant_room_ids.update(sync_op.room_ids) + + break + + actual_list = actual_lists.get(list_key) + if actual_list is not None: + # We only expect a single SYNC operation for any list + assert len(actual_list.ops) == 1 + sync_op = actual_list.ops[0] + assert sync_op.op == OperationType.SYNC + + relevant_room_ids.update(sync_op.room_ids) + + return relevant_room_ids + @trace async def get_to_device_extension_response( self, @@ -2081,7 +2181,8 @@ class SlidingSyncHandler: async def get_account_data_extension_response( self, sync_config: SlidingSyncConfig, - lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension, to_token: StreamToken, from_token: Optional[SlidingSyncStreamToken], @@ -2090,7 +2191,9 @@ class SlidingSyncHandler: Args: sync_config: Sync configuration - lists: Sliding window API. A map of list key to list results. + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. account_data_request: The account_data extension from the request to_token: The point in the stream to sync up to. from_token: The point in the stream to sync from. @@ -2103,6 +2206,7 @@ class SlidingSyncHandler: global_account_data_map: Mapping[str, JsonMapping] = {} if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` global_account_data_map = ( await self.store.get_updated_global_account_data_for_user( user_id, from_token.stream_token.account_data_key @@ -2114,76 +2218,40 @@ class SlidingSyncHandler: ) if have_push_rules_changed: global_account_data_map = dict(global_account_data_map) + # TODO: This should take into account the `from_token` and `to_token` global_account_data_map[AccountDataTypes.PUSH_RULES] = ( await self.push_rules_handler.push_rules_for_user(sync_config.user) ) else: + # TODO: This should take into account the `to_token` all_global_account_data = await self.store.get_global_account_data_for_user( user_id ) global_account_data_map = dict(all_global_account_data) + # TODO: This should take into account the `to_token` global_account_data_map[AccountDataTypes.PUSH_RULES] = ( await self.push_rules_handler.push_rules_for_user(sync_config.user) ) - # We only want to include account data for rooms that are already in the sliding - # sync response AND that were requested in the account data request. - relevant_room_ids: Set[str] = set() - - # See what rooms from the room subscriptions we should get account data for - if ( - account_data_request.rooms is not None - and sync_config.room_subscriptions is not None - ): - actual_room_ids = sync_config.room_subscriptions.keys() - - for room_id in account_data_request.rooms: - # A wildcard means we process all rooms from the room subscriptions - if room_id == "*": - relevant_room_ids.update(sync_config.room_subscriptions.keys()) - break - - if room_id in actual_room_ids: - relevant_room_ids.add(room_id) - - # See what rooms from the sliding window lists we should get account data for - if account_data_request.lists is not None: - for list_key in account_data_request.lists: - # Just some typing because we share the variable name in multiple places - actual_list: Optional[SlidingSyncResult.SlidingWindowList] = None - - # A wildcard means we process rooms from all lists - if list_key == "*": - for actual_list in lists.values(): - # We only expect a single SYNC operation for any list - assert len(actual_list.ops) == 1 - sync_op = actual_list.ops[0] - assert sync_op.op == OperationType.SYNC - - relevant_room_ids.update(sync_op.room_ids) - - break - - actual_list = lists.get(list_key) - if actual_list is not None: - # We only expect a single SYNC operation for any list - assert len(actual_list.ops) == 1 - sync_op = actual_list.ops[0] - assert sync_op.op == OperationType.SYNC - - relevant_room_ids.update(sync_op.room_ids) - # Fetch room account data account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] = {} + relevant_room_ids = self.find_relevant_room_ids_for_extension( + requested_lists=account_data_request.lists, + requested_room_ids=account_data_request.rooms, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + ) if len(relevant_room_ids) > 0: if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` account_data_by_room_map = ( await self.store.get_updated_room_account_data_for_user( user_id, from_token.stream_token.account_data_key ) ) else: + # TODO: This should take into account the `to_token` account_data_by_room_map = ( await self.store.get_room_account_data_for_user(user_id) ) @@ -2200,6 +2268,86 @@ class SlidingSyncHandler: account_data_by_room_map=account_data_by_room_map, ) + async def get_receipts_extension_response( + self, + sync_config: SlidingSyncConfig, + actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], + receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.ReceiptsExtension]: + """Handle Receipts extension (MSC3960) + + Args: + sync_config: Sync configuration + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + actual_room_response_map: A map of room ID to room results in the the + Sliding Sync response. + account_data_request: The account_data extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + # Skip if the extension is not enabled + if not receipts_request.enabled: + return None + + relevant_room_ids = self.find_relevant_room_ids_for_extension( + requested_lists=receipts_request.lists, + requested_room_ids=receipts_request.rooms, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + ) + + room_id_to_receipt_map: Dict[str, JsonMapping] = {} + if len(relevant_room_ids) > 0: + receipt_source = self.event_sources.sources.receipt + receipts, _ = await receipt_source.get_new_events( + user=sync_config.user, + from_key=( + from_token.stream_token.receipt_key + if from_token + else MultiWriterStreamToken(stream=0) + ), + to_key=to_token.receipt_key, + # This is a dummy value and isn't used in the function + limit=0, + room_ids=relevant_room_ids, + is_guest=False, + ) + + for receipt in receipts: + # These fields should exist for every receipt + room_id = receipt["room_id"] + type = receipt["type"] + content = receipt["content"] + + room_result = actual_room_response_map.get(room_id) + if room_result is not None: + if room_result.initial: + # TODO: In the future, it would be good to fetch less receipts + # out of the database in the first place but we would need to + # add a new `event_id` index to `receipts_linearized`. + relevant_event_ids = [ + event.event_id for event in room_result.timeline_events + ] + + assert isinstance(content, dict) + content = { + event_id: content_value + for event_id, content_value in content.items() + if event_id in relevant_event_ids + } + + room_id_to_receipt_map[room_id] = {"type": type, "content": content} + + return SlidingSyncResult.Extensions.ReceiptsExtension( + room_id_to_receipt_map=room_id_to_receipt_map, + ) + class HaveSentRoomFlag(Enum): """Flag for whether we have sent the room down a sliding sync connection. diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index ccfce6bd53..c607d08de5 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -1150,6 +1150,12 @@ class SlidingSyncRestServlet(RestServlet): }, } + if extensions.receipts is not None: + serialized_extensions["receipts"] = { + # Same as the the top-level `account_data.events` field in Sync v2. + "rooms": extensions.receipts.room_id_to_receipt_map, + } + return serialized_extensions diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index f26cc0e903..2f7e92665c 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -152,7 +152,7 @@ class SlidingSyncResult: Attributes: next_pos: The next position token in the sliding window to request (next_batch). lists: Sliding window API. A map of list key to list results. - rooms: Room subscription API. A map of room ID to room subscription to room results. + rooms: Room subscription API. A map of room ID to room results. extensions: Extensions API. A map of extension key to extension results. """ @@ -361,12 +361,28 @@ class SlidingSyncResult: self.global_account_data_map or self.account_data_by_room_map ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ReceiptsExtension: + """The Receipts extension (MSC3960) + + Attributes: + room_id_to_receipt_map: Mapping from room_id to `m.receipt` event (type, content) + """ + + room_id_to_receipt_map: Mapping[str, JsonMapping] + + def __bool__(self) -> bool: + return bool(self.room_id_to_receipt_map) + to_device: Optional[ToDeviceExtension] = None e2ee: Optional[E2eeExtension] = None account_data: Optional[AccountDataExtension] = None + receipts: Optional[ReceiptsExtension] = None def __bool__(self) -> bool: - return bool(self.to_device or self.e2ee or self.account_data) + return bool( + self.to_device or self.e2ee or self.account_data or self.receipts + ) next_pos: SlidingSyncStreamToken lists: Dict[str, SlidingWindowList] diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index dfe3b1e0f7..4e632e4492 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -342,9 +342,27 @@ class SlidingSyncBody(RequestBodyModel): # Process all room subscriptions defined in the Room Subscription API. (This is the default.) rooms: Optional[List[StrictStr]] = ["*"] + class ReceiptsExtension(RequestBodyModel): + """The Receipts extension (MSC3960) + + Attributes: + enabled + lists: List of list keys (from the Sliding Window API) to apply this + extension to. + rooms: List of room IDs (from the Room Subscription API) to apply this + extension to. + """ + + enabled: Optional[StrictBool] = False + # Process all lists defined in the Sliding Window API. (This is the default.) + lists: Optional[List[StrictStr]] = ["*"] + # Process all room subscriptions defined in the Room Subscription API. (This is the default.) + rooms: Optional[List[StrictStr]] = ["*"] + to_device: Optional[ToDeviceExtension] = None e2ee: Optional[E2eeExtension] = None account_data: Optional[AccountDataExtension] = None + receipts: Optional[ReceiptsExtension] = None conn_id: Optional[str] diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 5abf1041be..1184adde70 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -30,6 +30,7 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import ( AccountDataTypes, + EduTypes, EventContentFields, EventTypes, HistoryVisibility, @@ -1369,12 +1370,14 @@ class SlidingSyncTestCase(SlidingSyncBase): room.register_servlets, sync.register_servlets, devices.register_servlets, + receipts.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.event_sources = hs.get_event_sources() self.storage_controllers = hs.get_storage_controllers() + self.account_data_handler = hs.get_account_data_handler() def _assertRequiredStateIncludes( self, @@ -4454,6 +4457,225 @@ class SlidingSyncTestCase(SlidingSyncBase): # `world_readable` but currently we don't support this. self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"]) + # Any extensions that use `lists`/`rooms` should be tested here + @parameterized.expand([("account_data",), ("receipts",)]) + def test_extensions_lists_rooms_relevant_rooms(self, extension_name: str) -> None: + """ + With various extensions, test out requesting different variations of + `lists`/`rooms`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create some rooms + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id4 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id5 = self.helper.create_room_as(user1_id, tok=user1_tok) + + room_id_to_human_name_map = { + room_id1: "room1", + room_id2: "room2", + room_id3: "room3", + room_id4: "room4", + room_id5: "room5", + } + + for room_id in room_id_to_human_name_map.keys(): + if extension_name == "account_data": + # Add some account data to each room + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + elif extension_name == "receipts": + event_response = self.helper.send( + room_id, body="new event", tok=user1_tok + ) + # Read last event + channel = self.make_request( + "POST", + f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + else: + raise AssertionError(f"Unknown extension name: {extension_name}") + + main_sync_body = { + "lists": { + # We expect this list range to include room5 and room4 + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + }, + # We expect this list range to include room5, room4, room3 + "bar-list": { + "ranges": [[0, 2]], + "required_state": [], + "timeline_limit": 0, + }, + }, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + } + + # Mix lists and rooms + sync_body = { + **main_sync_body, + "extensions": { + extension_name: { + "enabled": True, + "lists": ["foo-list", "non-existent-list"], + "rooms": [room_id1, room_id2, "!non-existent-room"], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ✅ Requested via `rooms` and a room subscription exists + # room2: ❌ Requested via `rooms` but not in the response (from lists or room subscriptions) + # room3: ❌ Not requested + # room4: ✅ Shows up because requested via `lists` and list exists in the response + # room5: ✅ Shows up because requested via `lists` and list exists in the response + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"][extension_name] + .get("rooms") + .keys() + }, + {"room1", "room4", "room5"}, + exact=True, + ) + + # Try wildcards (this is the default) + sync_body = { + **main_sync_body, + "extensions": { + extension_name: { + "enabled": True, + # "lists": ["*"], + # "rooms": ["*"], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ✅ Shows up because of default `rooms` wildcard and is in one of the room subscriptions + # room2: ❌ Not requested + # room3: ✅ Shows up because of default `lists` wildcard and is in a list + # room4: ✅ Shows up because of default `lists` wildcard and is in a list + # room5: ✅ Shows up because of default `lists` wildcard and is in a list + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"][extension_name] + .get("rooms") + .keys() + }, + {"room1", "room3", "room4", "room5"}, + exact=True, + ) + + # Empty list will return nothing + sync_body = { + **main_sync_body, + "extensions": { + extension_name: { + "enabled": True, + "lists": [], + "rooms": [], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ❌ Not requested + # room2: ❌ Not requested + # room3: ❌ Not requested + # room4: ❌ Not requested + # room5: ❌ Not requested + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"][extension_name] + .get("rooms") + .keys() + }, + set(), + exact=True, + ) + + # Try wildcard and none + sync_body = { + **main_sync_body, + "extensions": { + extension_name: { + "enabled": True, + "lists": ["*"], + "rooms": [], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ❌ Not requested + # room2: ❌ Not requested + # room3: ✅ Shows up because of default `lists` wildcard and is in a list + # room4: ✅ Shows up because of default `lists` wildcard and is in a list + # room5: ✅ Shows up because of default `lists` wildcard and is in a list + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"][extension_name] + .get("rooms") + .keys() + }, + {"room3", "room4", "room5"}, + exact=True, + ) + + # Try requesting a room that is only in a list + sync_body = { + **main_sync_body, + "extensions": { + extension_name: { + "enabled": True, + "lists": [], + "rooms": [room_id5], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ❌ Not requested + # room2: ❌ Not requested + # room3: ❌ Not requested + # room4: ❌ Not requested + # room5: ✅ Requested via `rooms` and is in a list + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"][extension_name] + .get("rooms") + .keys() + }, + {"room5"}, + exact=True, + ) + def test_rooms_required_state_incremental_sync_LIVE(self) -> None: """Test that we only get state updates in incremental sync for rooms we've already seen (LIVE). @@ -5928,268 +6150,6 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): exact=True, ) - def test_room_account_data_relevant_rooms(self) -> None: - """ - Test out different variations of `lists`/`rooms` we are requesting account data for. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a room and add some room account data - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id1, - account_data_type="org.matrix.roorarraz", - content={"roo": "rar"}, - ) - ) - - # Create another room with some room account data - room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id2, - account_data_type="org.matrix.roorarraz", - content={"roo": "rar"}, - ) - ) - - # Create another room with some room account data - room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id3, - account_data_type="org.matrix.roorarraz", - content={"roo": "rar"}, - ) - ) - - # Create another room with some room account data - room_id4 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id4, - account_data_type="org.matrix.roorarraz", - content={"roo": "rar"}, - ) - ) - - # Create another room with some room account data - room_id5 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id5, - account_data_type="org.matrix.roorarraz", - content={"roo": "rar"}, - ) - ) - - room_id_to_human_name_map = { - room_id1: "room1", - room_id2: "room2", - room_id3: "room3", - room_id4: "room4", - room_id5: "room5", - } - - # Mix lists and rooms - sync_body = { - "lists": { - # We expect this list range to include room5 and room4 - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - }, - # We expect this list range to include room5, room4, room3 - "bar-list": { - "ranges": [[0, 2]], - "required_state": [], - "timeline_limit": 0, - }, - }, - "room_subscriptions": { - room_id1: { - "required_state": [], - "timeline_limit": 0, - } - }, - "extensions": { - "account_data": { - "enabled": True, - "lists": ["foo-list", "non-existent-list"], - "rooms": [room_id1, room_id2, "!non-existent-room"], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # room1: ✅ Requested via `rooms` and a room subscription exists - # room2: ❌ Requested via `rooms` but not in the response (from lists or room subscriptions) - # room3: ❌ Not requested - # room4: ✅ Shows up because requested via `lists` and list exists in the response - # room5: ✅ Shows up because requested via `lists` and list exists in the response - self.assertIncludes( - { - room_id_to_human_name_map[room_id] - for room_id in response_body["extensions"]["account_data"] - .get("rooms") - .keys() - }, - {"room1", "room4", "room5"}, - exact=True, - ) - - # Try wildcards (this is the default) - sync_body = { - "lists": { - # We expect this list range to include room5 and room4 - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - }, - # We expect this list range to include room5, room4, room3 - "bar-list": { - "ranges": [[0, 2]], - "required_state": [], - "timeline_limit": 0, - }, - }, - "room_subscriptions": { - room_id1: { - "required_state": [], - "timeline_limit": 0, - } - }, - "extensions": { - "account_data": { - "enabled": True, - # "lists": ["*"], - # "rooms": ["*"], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # room1: ✅ Shows up because of default `rooms` wildcard and is in one of the room subscriptions - # room2: ❌ Not requested - # room3: ✅ Shows up because of default `lists` wildcard and is in a list - # room4: ✅ Shows up because of default `lists` wildcard and is in a list - # room5: ✅ Shows up because of default `lists` wildcard and is in a list - self.assertIncludes( - { - room_id_to_human_name_map[room_id] - for room_id in response_body["extensions"]["account_data"] - .get("rooms") - .keys() - }, - {"room1", "room3", "room4", "room5"}, - exact=True, - ) - - # Empty list will return nothing - sync_body = { - "lists": { - # We expect this list range to include room5 and room4 - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - }, - # We expect this list range to include room5, room4, room3 - "bar-list": { - "ranges": [[0, 2]], - "required_state": [], - "timeline_limit": 0, - }, - }, - "room_subscriptions": { - room_id1: { - "required_state": [], - "timeline_limit": 0, - } - }, - "extensions": { - "account_data": { - "enabled": True, - "lists": [], - "rooms": [], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # room1: ❌ Not requested - # room2: ❌ Not requested - # room3: ❌ Not requested - # room4: ❌ Not requested - # room5: ❌ Not requested - self.assertIncludes( - { - room_id_to_human_name_map[room_id] - for room_id in response_body["extensions"]["account_data"] - .get("rooms") - .keys() - }, - set(), - exact=True, - ) - - # Try wildcard and none - sync_body = { - "lists": { - # We expect this list range to include room5 and room4 - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - }, - # We expect this list range to include room5, room4, room3 - "bar-list": { - "ranges": [[0, 2]], - "required_state": [], - "timeline_limit": 0, - }, - }, - "room_subscriptions": { - room_id1: { - "required_state": [], - "timeline_limit": 0, - } - }, - "extensions": { - "account_data": { - "enabled": True, - "lists": ["*"], - "rooms": [], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # room1: ❌ Not requested - # room2: ❌ Not requested - # room3: ✅ Shows up because of default `lists` wildcard and is in a list - # room4: ✅ Shows up because of default `lists` wildcard and is in a list - # room5: ✅ Shows up because of default `lists` wildcard and is in a list - self.assertIncludes( - { - room_id_to_human_name_map[room_id] - for room_id in response_body["extensions"]["account_data"] - .get("rooms") - .keys() - }, - {"room3", "room4", "room5"}, - exact=True, - ) - def test_wait_for_new_data(self) -> None: """ Test to make sure that the Sliding Sync request waits for new data to arrive. @@ -6307,3 +6267,654 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): self.assertIsNotNone( channel.json_body["extensions"]["account_data"].get("rooms") ) + + +class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): + """Tests for the receipts sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + receipts.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def test_no_data_initial_sync(self) -> None: + """ + Test that enabling the receipts extension works during an intitial sync, + even if there is no-data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Make an initial Sliding Sync request with the receipts extension enabled + sync_body = { + "lists": {}, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_no_data_incremental_sync(self) -> None: + """ + Test that enabling receipts extension works during an incremental sync, even + if there is no-data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the receipts extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_receipts_initial_sync_with_timeline(self) -> None: + """ + On initial sync, we only return receipts for events in a given room's timeline. + + We also make sure that we only return receipts for rooms that we request and are + already being returned in the Sliding Sync response. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + # Create a room + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + room1_event_response1 = self.helper.send( + room_id1, body="new event1", tok=user2_tok + ) + room1_event_response2 = self.helper.send( + room_id1, body="new event2", tok=user2_tok + ) + # User1 reads the last event + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User2 reads the last event + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}", + {}, + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User3 reads the first event + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", + {}, + access_token=user3_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User4 privately reads the last event (make sure this doesn't leak to the other users) + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ_PRIVATE}/{room1_event_response2['event_id']}", + {}, + access_token=user4_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Create another room + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + self.helper.join(room_id2, user3_id, tok=user3_tok) + self.helper.join(room_id2, user4_id, tok=user4_tok) + room2_event_response1 = self.helper.send( + room_id2, body="new event2", tok=user2_tok + ) + # User1 reads the last event + channel = self.make_request( + "POST", + f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User2 reads the last event + channel = self.make_request( + "POST", + f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}", + {}, + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User4 privately reads the last event (make sure this doesn't leak to the other users) + channel = self.make_request( + "POST", + f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}", + {}, + access_token=user4_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Make an initial Sliding Sync request with the receipts extension enabled + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + # On initial sync, we only have receipts for events in the timeline + "timeline_limit": 1, + } + }, + "extensions": { + "receipts": { + "enabled": True, + "rooms": [room_id1, room_id2], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Only the latest event in the room is in the timelie because the `timeline_limit` is 1 + self.assertIncludes( + { + event["event_id"] + for event in response_body["rooms"][room_id1].get("timeline", []) + }, + {room1_event_response2["event_id"]}, + exact=True, + message=str(response_body["rooms"][room_id1]), + ) + + # Even though we requested room2, we only expect room1 to show up because that's + # the only room in the Sliding Sync response (room2 is not one of our room + # subscriptions or in a sliding window list). + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["receipts"]["rooms"][room_id1]["type"], + EduTypes.RECEIPT, + ) + # We can see user1 and user2 read receipts + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response2["event_id"] + ][ReceiptTypes.READ].keys(), + {user1_id, user2_id}, + exact=True, + ) + # User1 did not have a private read receipt and we shouldn't leak others' + # private read receipts + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response2["event_id"] + ] + .get(ReceiptTypes.READ_PRIVATE, {}) + .keys(), + set(), + exact=True, + ) + + # We shouldn't see receipts for event2 since it wasn't in the timeline and this is an initial sync + self.assertIsNone( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"].get( + room1_event_response1["event_id"] + ) + ) + + def test_receipts_incremental_sync(self) -> None: + """ + On incremental sync, we return all receipts in the token range for a given room + but only for rooms that we request and are being returned in the Sliding Sync + response. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + # Create room1 + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + room1_event_response1 = self.helper.send( + room_id1, body="new event2", tok=user2_tok + ) + # User2 reads the last event (before the `from_token`) + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", + {}, + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Create room2 + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + room2_event_response1 = self.helper.send( + room_id2, body="new event2", tok=user2_tok + ) + # User1 reads the last event (before the `from_token`) + channel = self.make_request( + "POST", + f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Create room3 + room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id3, user1_id, tok=user1_tok) + self.helper.join(room_id3, user3_id, tok=user3_tok) + room3_event_response1 = self.helper.send( + room_id3, body="new event", tok=user2_tok + ) + + # Create room4 + room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id4, user1_id, tok=user1_tok) + self.helper.join(room_id4, user3_id, tok=user3_tok) + event_response4 = self.helper.send(room_id4, body="new event", tok=user2_tok) + # User1 reads the last event (before the `from_token`) + channel = self.make_request( + "POST", + f"/rooms/{room_id4}/receipt/{ReceiptTypes.READ}/{event_response4['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + }, + room_id3: { + "required_state": [], + "timeline_limit": 0, + }, + room_id4: { + "required_state": [], + "timeline_limit": 0, + }, + }, + "extensions": { + "receipts": { + "enabled": True, + "rooms": [room_id1, room_id2, room_id3, room_id4], + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Add some more read receipts after the `from_token` + # + # User1 reads room1 + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User1 privately reads room2 + channel = self.make_request( + "POST", + f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User3 reads room3 + channel = self.make_request( + "POST", + f"/rooms/{room_id3}/receipt/{ReceiptTypes.READ}/{room3_event_response1['event_id']}", + {}, + access_token=user3_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # No activity for room4 after the `from_token` + + # Make an incremental Sliding Sync request with the receipts extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Even though we requested room2, we only expect rooms to show up if they are + # already in the Sliding Sync response. room4 doesn't show up because there is + # no activity after the `from_token`. + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id1, room_id3}, + exact=True, + ) + + # Check room1: + # + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["receipts"]["rooms"][room_id1]["type"], + EduTypes.RECEIPT, + ) + # We only see that user1 has read something in room1 since the `from_token` + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response1["event_id"] + ][ReceiptTypes.READ].keys(), + {user1_id}, + exact=True, + ) + # User1 did not send a private read receipt in this room and we shouldn't leak + # others' private read receipts + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response1["event_id"] + ] + .get(ReceiptTypes.READ_PRIVATE, {}) + .keys(), + set(), + exact=True, + ) + # No events in the timeline since they were sent before the `from_token` + self.assertNotIn(room_id1, response_body["rooms"]) + + # Check room3: + # + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["receipts"]["rooms"][room_id3]["type"], + EduTypes.RECEIPT, + ) + # We only see that user3 has read something in room1 since the `from_token` + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][ + room3_event_response1["event_id"] + ][ReceiptTypes.READ].keys(), + {user3_id}, + exact=True, + ) + # User1 did not send a private read receipt in this room and we shouldn't leak + # others' private read receipts + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][ + room3_event_response1["event_id"] + ] + .get(ReceiptTypes.READ_PRIVATE, {}) + .keys(), + set(), + exact=True, + ) + # No events in the timeline since they were sent before the `from_token` + self.assertNotIn(room_id3, response_body["rooms"]) + + def test_receipts_incremental_sync_all_live_receipts(self) -> None: + """ + On incremental sync, we return all receipts in the token range for a given room + even if they are not in the timeline. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create room1 + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + # The timeline will only include event2 + "timeline_limit": 1, + }, + }, + "extensions": { + "receipts": { + "enabled": True, + "rooms": [room_id1], + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + room1_event_response1 = self.helper.send( + room_id1, body="new event1", tok=user2_tok + ) + room1_event_response2 = self.helper.send( + room_id1, body="new event2", tok=user2_tok + ) + + # User1 reads event1 + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User2 reads event2 + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}", + {}, + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Make an incremental Sliding Sync request with the receipts extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # We should see room1 because it has receipts in the token range + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["receipts"]["rooms"][room_id1]["type"], + EduTypes.RECEIPT, + ) + # We should see all receipts in the token range regardless of whether the events + # are in the timeline + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response1["event_id"] + ][ReceiptTypes.READ].keys(), + {user1_id}, + exact=True, + ) + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response2["event_id"] + ][ReceiptTypes.READ].keys(), + {user2_id}, + exact=True, + ) + # Only the latest event in the timeline because the `timeline_limit` is 1 + self.assertIncludes( + { + event["event_id"] + for event in response_body["rooms"][room_id1].get("timeline", []) + }, + {room1_event_response2["event_id"]}, + exact=True, + message=str(response_body["rooms"][room_id1]), + ) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + event_response = self.helper.send(room_id, body="new event", tok=user2_tok) + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id: { + "required_state": [], + "timeline_limit": 0, + }, + }, + "extensions": { + "receipts": { + "enabled": True, + "rooms": [room_id], + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the receipts extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the receipts to trigger new results + receipt_channel = self.make_request( + "POST", + f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}", + {}, + access_token=user2_tok, + ) + self.assertEqual(receipt_channel.code, 200, receipt_channel.json_body) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the new receipt + self.assertIncludes( + channel.json_body.get("extensions", {}) + .get("receipts", {}) + .get("rooms", {}) + .keys(), + {room_id}, + exact=True, + message=str(channel.json_body), + ) + self.assertIncludes( + channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][ + event_response["event_id"] + ][ReceiptTypes.READ].keys(), + {user2_id}, + exact=True, + ) + # User1 did not send a private read receipt in this room and we shouldn't leak + # others' private read receipts + self.assertIncludes( + channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][ + event_response["event_id"] + ] + .get(ReceiptTypes.READ_PRIVATE, {}) + .keys(), + set(), + exact=True, + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the receipts extension doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events( + user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA + ) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + self.assertIncludes( + channel.json_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) From 46de0ee16be8731f0ed68654edc75aced1510b19 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 30 Jul 2024 13:20:29 -0500 Subject: [PATCH 088/332] Sliding Sync: Update filters to be robust against remote invite rooms (#17450) Update `filters.is_encrypted` and `filters.types`/`filters.not_types` to be robust when dealing with remote invite rooms in Sliding Sync. Part of [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync Follow-up to https://github.com/element-hq/synapse/pull/17434 We now take into account current state, fallback to stripped state for invite/knock rooms, then historical state. If we can't determine the info needed to filter a room (either from state or stripped state), it is filtered out. --- changelog.d/17450.bugfix | 1 + synapse/api/constants.py | 8 + synapse/events/__init__.py | 19 + synapse/events/utils.py | 29 +- synapse/handlers/sliding_sync.py | 385 +++++++++- synapse/handlers/stats.py | 4 +- synapse/storage/_base.py | 4 + synapse/storage/databases/main/cache.py | 22 +- synapse/storage/databases/main/state.py | 215 +++++- tests/handlers/test_sliding_sync.py | 890 ++++++++++++++++++++++-- tests/rest/client/test_sync.py | 145 ++++ 11 files changed, 1605 insertions(+), 117 deletions(-) create mode 100644 changelog.d/17450.bugfix diff --git a/changelog.d/17450.bugfix b/changelog.d/17450.bugfix new file mode 100644 index 0000000000..01a521da38 --- /dev/null +++ b/changelog.d/17450.bugfix @@ -0,0 +1 @@ +Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to handle invite/knock rooms when filtering. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 85001d9676..7dcb1e01fd 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -225,6 +225,11 @@ class EventContentFields: # This is deprecated in MSC2175. ROOM_CREATOR: Final = "creator" + # The version of the room for `m.room.create` events. + ROOM_VERSION: Final = "room_version" + + ROOM_NAME: Final = "name" + # Used in m.room.guest_access events. GUEST_ACCESS: Final = "guest_access" @@ -237,6 +242,9 @@ class EventContentFields: # an unspecced field added to to-device messages to identify them uniquely-ish TO_DEVICE_MSGID: Final = "org.matrix.msgid" + # `m.room.encryption`` algorithm field + ENCRYPTION_ALGORITHM: Final = "algorithm" + class EventUnsignedContentFields: """Fields found inside the 'unsigned' data on events""" diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 36e0f47e51..2e56b671f0 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -554,3 +554,22 @@ def relation_from_event(event: EventBase) -> Optional[_EventRelation]: aggregation_key = None return _EventRelation(parent_id, rel_type, aggregation_key) + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class StrippedStateEvent: + """ + A stripped down state event. Usually used for remote invite/knocks so the user can + make an informed decision on whether they want to join. + + Attributes: + type: Event `type` + state_key: Event `state_key` + sender: Event `sender` + content: Event `content` + """ + + type: str + state_key: str + sender: str + content: Dict[str, Any] diff --git a/synapse/events/utils.py b/synapse/events/utils.py index f937fd4698..54f94add4d 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -49,7 +49,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersion from synapse.types import JsonDict, Requester -from . import EventBase, make_event_from_dict +from . import EventBase, StrippedStateEvent, make_event_from_dict if TYPE_CHECKING: from synapse.handlers.relations import BundledAggregations @@ -854,3 +854,30 @@ def strip_event(event: EventBase) -> JsonDict: "content": event.content, "sender": event.sender, } + + +def parse_stripped_state_event(raw_stripped_event: Any) -> Optional[StrippedStateEvent]: + """ + Given a raw value from an event's `unsigned` field, attempt to parse it into a + `StrippedStateEvent`. + """ + if isinstance(raw_stripped_event, dict): + # All of these fields are required + type = raw_stripped_event.get("type") + state_key = raw_stripped_event.get("state_key") + sender = raw_stripped_event.get("sender") + content = raw_stripped_event.get("content") + if ( + isinstance(type, str) + and isinstance(state_key, str) + and isinstance(sender, str) + and isinstance(content, dict) + ): + return StrippedStateEvent( + type=type, + state_key=state_key, + sender=sender, + content=content, + ) + + return None diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 7a734f6712..530e7b7b4e 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -17,6 +17,7 @@ # [This file includes modifications made by New Vector Limited] # # +import enum import logging from enum import Enum from itertools import chain @@ -26,23 +27,35 @@ from typing import ( Dict, Final, List, + Literal, Mapping, Optional, Sequence, Set, Tuple, + Union, ) import attr from immutabledict import immutabledict from typing_extensions import assert_never -from synapse.api.constants import AccountDataTypes, Direction, EventTypes, Membership -from synapse.events import EventBase -from synapse.events.utils import strip_event +from synapse.api.constants import ( + AccountDataTypes, + Direction, + EventContentFields, + EventTypes, + Membership, +) +from synapse.events import EventBase, StrippedStateEvent +from synapse.events.utils import parse_stripped_state_event, strip_event from synapse.handlers.relations import BundledAggregations from synapse.logging.opentracing import log_kv, start_active_span, tag_args, trace from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary +from synapse.storage.databases.main.state import ( + ROOM_UNKNOWN_SENTINEL, + Sentinel as StateSentinel, +) from synapse.storage.databases.main.stream import CurrentStateDeltaMembership from synapse.storage.roommember import MemberSummary from synapse.types import ( @@ -50,6 +63,7 @@ from synapse.types import ( JsonDict, JsonMapping, MultiWriterStreamToken, + MutableStateMap, PersistedEventPosition, Requester, RoomStreamToken, @@ -71,6 +85,12 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +class Sentinel(enum.Enum): + # defining a sentinel in this way allows mypy to correctly handle the + # type of a dictionary lookup and subsequent type narrowing. + UNSET_SENTINEL = object() + + # The event types that clients should consider as new activity. DEFAULT_BUMP_EVENT_TYPES = { EventTypes.Create, @@ -1172,6 +1192,265 @@ class SlidingSyncHandler: # return None + async def _bulk_get_stripped_state_for_rooms_from_sync_room_map( + self, + room_ids: StrCollection, + sync_room_map: Dict[str, _RoomMembershipForUser], + ) -> Dict[str, Optional[StateMap[StrippedStateEvent]]]: + """ + Fetch stripped state for a list of room IDs. Stripped state is only + applicable to invite/knock rooms. Other rooms will have `None` as their + stripped state. + + For invite rooms, we pull from `unsigned.invite_room_state`. + For knock rooms, we pull from `unsigned.knock_room_state`. + + Args: + room_ids: Room IDs to fetch stripped state for + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + + Returns: + Mapping from room_id to mapping of (type, state_key) to stripped state + event. + """ + room_id_to_stripped_state_map: Dict[ + str, Optional[StateMap[StrippedStateEvent]] + ] = {} + + # Fetch what we haven't before + room_ids_to_fetch = [ + room_id + for room_id in room_ids + if room_id not in room_id_to_stripped_state_map + ] + + # Gather a list of event IDs we can grab stripped state from + invite_or_knock_event_ids: List[str] = [] + for room_id in room_ids_to_fetch: + if sync_room_map[room_id].membership in ( + Membership.INVITE, + Membership.KNOCK, + ): + event_id = sync_room_map[room_id].event_id + # If this is an invite/knock then there should be an event_id + assert event_id is not None + invite_or_knock_event_ids.append(event_id) + else: + room_id_to_stripped_state_map[room_id] = None + + invite_or_knock_events = await self.store.get_events(invite_or_knock_event_ids) + for invite_or_knock_event in invite_or_knock_events.values(): + room_id = invite_or_knock_event.room_id + membership = invite_or_knock_event.membership + + raw_stripped_state_events = None + if membership == Membership.INVITE: + invite_room_state = invite_or_knock_event.unsigned.get( + "invite_room_state" + ) + raw_stripped_state_events = invite_room_state + elif membership == Membership.KNOCK: + knock_room_state = invite_or_knock_event.unsigned.get( + "knock_room_state" + ) + raw_stripped_state_events = knock_room_state + else: + raise AssertionError( + f"Unexpected membership {membership} (this is a problem with Synapse itself)" + ) + + stripped_state_map: Optional[MutableStateMap[StrippedStateEvent]] = None + # Scrutinize unsigned things. `raw_stripped_state_events` should be a list + # of stripped events + if raw_stripped_state_events is not None: + stripped_state_map = {} + if isinstance(raw_stripped_state_events, list): + for raw_stripped_event in raw_stripped_state_events: + stripped_state_event = parse_stripped_state_event( + raw_stripped_event + ) + if stripped_state_event is not None: + stripped_state_map[ + ( + stripped_state_event.type, + stripped_state_event.state_key, + ) + ] = stripped_state_event + + room_id_to_stripped_state_map[room_id] = stripped_state_map + + return room_id_to_stripped_state_map + + async def _bulk_get_partial_current_state_content_for_rooms( + self, + content_type: Literal[ + # `content.type` from `EventTypes.Create`` + "room_type", + # `content.algorithm` from `EventTypes.RoomEncryption` + "room_encryption", + ], + room_ids: Set[str], + sync_room_map: Dict[str, _RoomMembershipForUser], + to_token: StreamToken, + room_id_to_stripped_state_map: Dict[ + str, Optional[StateMap[StrippedStateEvent]] + ], + ) -> Mapping[str, Union[Optional[str], StateSentinel]]: + """ + Get the given state event content for a list of rooms. First we check the + current state of the room, then fallback to stripped state if available, then + historical state. + + Args: + content_type: Which content to grab + room_ids: Room IDs to fetch the given content field for. + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + to_token: We filter based on the state of the room at this token + room_id_to_stripped_state_map: This does not need to be filled in before + calling this function. Mapping from room_id to mapping of (type, state_key) + to stripped state event. Modified in place when we fetch new rooms so we can + save work next time this function is called. + + Returns: + A mapping from room ID to the state event content if the room has + the given state event (event_type, ""), otherwise `None`. Rooms unknown to + this server will return `ROOM_UNKNOWN_SENTINEL`. + """ + room_id_to_content: Dict[str, Union[Optional[str], StateSentinel]] = {} + + # As a bulk shortcut, use the current state if the server is particpating in the + # room (meaning we have current state). Ideally, for leave/ban rooms, we would + # want the state at the time of the membership instead of current state to not + # leak anything but we consider the create/encryption stripped state events to + # not be a secret given they are often set at the start of the room and they are + # normally handed out on invite/knock. + # + # Be mindful to only use this for non-sensitive details. For example, even + # though the room name/avatar/topic are also stripped state, they seem a lot + # more senstive to leak the current state value of. + # + # Since this function is cached, we need to make a mutable copy via + # `dict(...)`. + event_type = "" + event_content_field = "" + if content_type == "room_type": + event_type = EventTypes.Create + event_content_field = EventContentFields.ROOM_TYPE + room_id_to_content = dict(await self.store.bulk_get_room_type(room_ids)) + elif content_type == "room_encryption": + event_type = EventTypes.RoomEncryption + event_content_field = EventContentFields.ENCRYPTION_ALGORITHM + room_id_to_content = dict( + await self.store.bulk_get_room_encryption(room_ids) + ) + else: + assert_never(content_type) + + room_ids_with_results = [ + room_id + for room_id, content_field in room_id_to_content.items() + if content_field is not ROOM_UNKNOWN_SENTINEL + ] + + # We might not have current room state for remote invite/knocks if we are + # the first person on our server to see the room. The best we can do is look + # in the optional stripped state from the invite/knock event. + room_ids_without_results = room_ids.difference( + chain( + room_ids_with_results, + [ + room_id + for room_id, stripped_state_map in room_id_to_stripped_state_map.items() + if stripped_state_map is not None + ], + ) + ) + room_id_to_stripped_state_map.update( + await self._bulk_get_stripped_state_for_rooms_from_sync_room_map( + room_ids_without_results, sync_room_map + ) + ) + + # Update our `room_id_to_content` map based on the stripped state + # (applies to invite/knock rooms) + rooms_ids_without_stripped_state: Set[str] = set() + for room_id in room_ids_without_results: + stripped_state_map = room_id_to_stripped_state_map.get( + room_id, Sentinel.UNSET_SENTINEL + ) + assert stripped_state_map is not Sentinel.UNSET_SENTINEL, ( + f"Stripped state left unset for room {room_id}. " + + "Make sure you're calling `_bulk_get_stripped_state_for_rooms_from_sync_room_map(...)` " + + "with that room_id. (this is a problem with Synapse itself)" + ) + + # If there is some stripped state, we assume the remote server passed *all* + # of the potential stripped state events for the room. + if stripped_state_map is not None: + create_stripped_event = stripped_state_map.get((EventTypes.Create, "")) + stripped_event = stripped_state_map.get((event_type, "")) + # Sanity check that we at-least have the create event + if create_stripped_event is not None: + if stripped_event is not None: + room_id_to_content[room_id] = stripped_event.content.get( + event_content_field + ) + else: + # Didn't see the state event we're looking for in the stripped + # state so we can assume relevant content field is `None`. + room_id_to_content[room_id] = None + else: + rooms_ids_without_stripped_state.add(room_id) + + # Last resort, we might not have current room state for rooms that the + # server has left (no one local is in the room) but we can look at the + # historical state. + # + # Update our `room_id_to_content` map based on the state at the time of + # the membership event. + for room_id in rooms_ids_without_stripped_state: + # TODO: It would be nice to look this up in a bulk way (N+1 queries) + # + # TODO: `get_state_at(...)` doesn't take into account the "current state". + room_state = await self.storage_controllers.state.get_state_at( + room_id=room_id, + stream_position=to_token.copy_and_replace( + StreamKeyType.ROOM, + sync_room_map[room_id].event_pos.to_room_stream_token(), + ), + state_filter=StateFilter.from_types( + [ + (EventTypes.Create, ""), + (event_type, ""), + ] + ), + # Partially-stated rooms should have all state events except for + # remote membership events so we don't need to wait at all because + # we only want the create event and some non-member event. + await_full_state=False, + ) + # We can use the create event as a canary to tell whether the server has + # seen the room before + create_event = room_state.get((EventTypes.Create, "")) + state_event = room_state.get((event_type, "")) + + if create_event is None: + # Skip for unknown rooms + continue + + if state_event is not None: + room_id_to_content[room_id] = state_event.content.get( + event_content_field + ) + else: + # Didn't see the state event we're looking for in the stripped + # state so we can assume relevant content field is `None`. + room_id_to_content[room_id] = None + + return room_id_to_content + @trace async def filter_rooms( self, @@ -1194,6 +1473,10 @@ class SlidingSyncHandler: A filtered dictionary of room IDs along with membership information in the room at the time of `to_token`. """ + room_id_to_stripped_state_map: Dict[ + str, Optional[StateMap[StrippedStateEvent]] + ] = {} + filtered_room_id_set = set(sync_room_map.keys()) # Filter for Direct-Message (DM) rooms @@ -1213,31 +1496,34 @@ class SlidingSyncHandler: if not sync_room_map[room_id].is_dm } - if filters.spaces: + if filters.spaces is not None: raise NotImplementedError() # Filter for encrypted rooms if filters.is_encrypted is not None: + room_id_to_encryption = ( + await self._bulk_get_partial_current_state_content_for_rooms( + content_type="room_encryption", + room_ids=filtered_room_id_set, + to_token=to_token, + sync_room_map=sync_room_map, + room_id_to_stripped_state_map=room_id_to_stripped_state_map, + ) + ) + # Make a copy so we don't run into an error: `Set changed size during # iteration`, when we filter out and remove items for room_id in filtered_room_id_set.copy(): - state_at_to_token = await self.storage_controllers.state.get_state_at( - room_id, - to_token, - state_filter=StateFilter.from_types( - [(EventTypes.RoomEncryption, "")] - ), - # Partially-stated rooms should have all state events except for the - # membership events so we don't need to wait because we only care - # about retrieving the `EventTypes.RoomEncryption` state event here. - # Plus we don't want to block the whole sync waiting for this one - # room. - await_full_state=False, - ) - is_encrypted = state_at_to_token.get((EventTypes.RoomEncryption, "")) + encryption = room_id_to_encryption.get(room_id, ROOM_UNKNOWN_SENTINEL) + + # Just remove rooms if we can't determine their encryption status + if encryption is ROOM_UNKNOWN_SENTINEL: + filtered_room_id_set.remove(room_id) + continue # If we're looking for encrypted rooms, filter out rooms that are not # encrypted and vice versa + is_encrypted = encryption is not None if (filters.is_encrypted and not is_encrypted) or ( not filters.is_encrypted and is_encrypted ): @@ -1263,15 +1549,26 @@ class SlidingSyncHandler: # provided in the list. `None` is a valid type for rooms which do not have a # room type. if filters.room_types is not None or filters.not_room_types is not None: - room_to_type = await self.store.bulk_get_room_type( - { - room_id - for room_id in filtered_room_id_set - # We only know the room types for joined rooms - if sync_room_map[room_id].membership == Membership.JOIN - } + room_id_to_type = ( + await self._bulk_get_partial_current_state_content_for_rooms( + content_type="room_type", + room_ids=filtered_room_id_set, + to_token=to_token, + sync_room_map=sync_room_map, + room_id_to_stripped_state_map=room_id_to_stripped_state_map, + ) ) - for room_id, room_type in room_to_type.items(): + + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + room_type = room_id_to_type.get(room_id, ROOM_UNKNOWN_SENTINEL) + + # Just remove rooms if we can't determine their type + if room_type is ROOM_UNKNOWN_SENTINEL: + filtered_room_id_set.remove(room_id) + continue + if ( filters.room_types is not None and room_type not in filters.room_types @@ -1284,13 +1581,24 @@ class SlidingSyncHandler: ): filtered_room_id_set.remove(room_id) - if filters.room_name_like: + if filters.room_name_like is not None: + # TODO: The room name is a bit more sensitive to leak than the + # create/encryption event. Maybe we should consider a better way to fetch + # historical state before implementing this. + # + # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms( + # content_type="room_name", + # room_ids=filtered_room_id_set, + # to_token=to_token, + # sync_room_map=sync_room_map, + # room_id_to_stripped_state_map=room_id_to_stripped_state_map, + # ) raise NotImplementedError() - if filters.tags: + if filters.tags is not None: raise NotImplementedError() - if filters.not_tags: + if filters.not_tags is not None: raise NotImplementedError() # Assemble a new sync room map but only with the `filtered_room_id_set` @@ -1371,14 +1679,17 @@ class SlidingSyncHandler: in the room at the time of `to_token`. to_token: The point in the stream to sync up to. """ - room_state_ids: StateMap[str] + state_ids: StateMap[str] # People shouldn't see past their leave/ban event if room_membership_for_user_at_to_token.membership in ( Membership.LEAVE, Membership.BAN, ): - # TODO: `get_state_ids_at(...)` doesn't take into account the "current state" - room_state_ids = await self.storage_controllers.state.get_state_ids_at( + # TODO: `get_state_ids_at(...)` doesn't take into account the "current + # state". Maybe we need to use + # `get_forward_extremities_for_room_at_stream_ordering(...)` to "Fetch the + # current state at the time." + state_ids = await self.storage_controllers.state.get_state_ids_at( room_id, stream_position=to_token.copy_and_replace( StreamKeyType.ROOM, @@ -1397,7 +1708,7 @@ class SlidingSyncHandler: ) # Otherwise, we can get the latest current state in the room else: - room_state_ids = await self.storage_controllers.state.get_current_state_ids( + state_ids = await self.storage_controllers.state.get_current_state_ids( room_id, state_filter, # Partially-stated rooms should have all state events except for @@ -1412,7 +1723,7 @@ class SlidingSyncHandler: ) # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` - return room_state_ids + return state_ids async def get_current_state_at( self, @@ -1432,17 +1743,17 @@ class SlidingSyncHandler: in the room at the time of `to_token`. to_token: The point in the stream to sync up to. """ - room_state_ids = await self.get_current_state_ids_at( + state_ids = await self.get_current_state_ids_at( room_id=room_id, room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, state_filter=state_filter, to_token=to_token, ) - event_map = await self.store.get_events(list(room_state_ids.values())) + event_map = await self.store.get_events(list(state_ids.values())) state_map = {} - for key, event_id in room_state_ids.items(): + for key, event_id in state_ids.items(): event = event_map.get(event_id) if event: state_map[key] = event diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 1c94f3ca46..8f90c17060 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -293,7 +293,9 @@ class StatsHandler: "history_visibility" ) elif delta.event_type == EventTypes.RoomEncryption: - room_state["encryption"] = event_content.get("algorithm") + room_state["encryption"] = event_content.get( + EventContentFields.ENCRYPTION_ALGORITHM + ) elif delta.event_type == EventTypes.Name: room_state["name"] = event_content.get("name") elif delta.event_type == EventTypes.Topic: diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 066f3d08ae..e12ab94576 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -127,6 +127,8 @@ class SQLBaseStore(metaclass=ABCMeta): # Purge other caches based on room state. self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,)) + self._attempt_to_invalidate_cache("get_room_type", (room_id,)) + self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) def _invalidate_state_caches_all(self, room_id: str) -> None: """Invalidates caches that are based on the current state, but does @@ -153,6 +155,8 @@ class SQLBaseStore(metaclass=ABCMeta): "_get_rooms_for_local_user_where_membership_is_inner", None ) self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) + self._attempt_to_invalidate_cache("get_room_type", (room_id,)) + self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) def _attempt_to_invalidate_cache( self, cache_name: str, key: Optional[Collection[Any]] diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 26b8e1a172..63624f3e8f 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -268,13 +268,23 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined] if data.type == EventTypes.Member: - self.get_rooms_for_user.invalidate((data.state_key,)) # type: ignore[attr-defined] + self._attempt_to_invalidate_cache( + "get_rooms_for_user", (data.state_key,) + ) + elif data.type == EventTypes.RoomEncryption: + self._attempt_to_invalidate_cache( + "get_room_encryption", (data.room_id,) + ) + elif data.type == EventTypes.Create: + self._attempt_to_invalidate_cache("get_room_type", (data.room_id,)) elif row.type == EventsStreamAllStateRow.TypeId: assert isinstance(data, EventsStreamAllStateRow) # Similar to the above, but the entire caches are invalidated. This is # unfortunate for the membership caches, but should recover quickly. self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined] - self.get_rooms_for_user.invalidate_all() # type: ignore[attr-defined] + self._attempt_to_invalidate_cache("get_rooms_for_user", None) + self._attempt_to_invalidate_cache("get_room_type", (data.room_id,)) + self._attempt_to_invalidate_cache("get_room_encryption", (data.room_id,)) else: raise Exception("Unknown events stream row type %s" % (row.type,)) @@ -345,6 +355,10 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "get_forgotten_rooms_for_user", (state_key,) ) + elif etype == EventTypes.Create: + self._attempt_to_invalidate_cache("get_room_type", (room_id,)) + elif etype == EventTypes.RoomEncryption: + self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) if relates_to: self._attempt_to_invalidate_cache( @@ -405,6 +419,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_thread_summary", None) self._attempt_to_invalidate_cache("get_thread_participated", None) self._attempt_to_invalidate_cache("get_threads", (room_id,)) + self._attempt_to_invalidate_cache("get_room_type", (room_id,)) + self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) self._attempt_to_invalidate_cache("_get_state_group_for_event", None) @@ -457,6 +473,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) self._attempt_to_invalidate_cache("_get_membership_from_event_id", None) self._attempt_to_invalidate_cache("get_room_version_id", (room_id,)) + self._attempt_to_invalidate_cache("get_room_type", (room_id,)) + self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) # And delete state caches. diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 5188b2f7a4..62bc4600fb 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -30,6 +30,7 @@ from typing import ( Iterable, List, Mapping, + MutableMapping, Optional, Set, Tuple, @@ -72,10 +73,18 @@ logger = logging.getLogger(__name__) _T = TypeVar("_T") - MAX_STATE_DELTA_HOPS = 100 +# Freeze so it's immutable and we can use it as a cache value +@attr.s(slots=True, frozen=True, auto_attribs=True) +class Sentinel: + pass + + +ROOM_UNKNOWN_SENTINEL = Sentinel() + + @attr.s(slots=True, frozen=True, auto_attribs=True) class EventMetadata: """Returned by `get_metadata_for_events`""" @@ -300,51 +309,189 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): @cached(max_entries=10000) async def get_room_type(self, room_id: str) -> Optional[str]: - """Get the room type for a given room. The server must be joined to the - given room. - """ - - row = await self.db_pool.simple_select_one( - table="room_stats_state", - keyvalues={"room_id": room_id}, - retcols=("room_type",), - allow_none=True, - desc="get_room_type", - ) - - if row is not None: - return row[0] - - # If we haven't updated `room_stats_state` with the room yet, query the - # create event directly. - create_event = await self.get_create_event_for_room(room_id) - room_type = create_event.content.get(EventContentFields.ROOM_TYPE) - return room_type + raise NotImplementedError() @cachedList(cached_method_name="get_room_type", list_name="room_ids") async def bulk_get_room_type( self, room_ids: Set[str] - ) -> Mapping[str, Optional[str]]: - """Bulk fetch room types for the given rooms, the server must be in all - the rooms given. + ) -> Mapping[str, Union[Optional[str], Sentinel]]: + """ + Bulk fetch room types for the given rooms (via current state). + + Since this function is cached, any missing values would be cached as `None`. In + order to distinguish between an unencrypted room that has `None` encryption and + a room that is unknown to the server where we might want to omit the value + (which would make it cached as `None`), instead we use the sentinel value + `ROOM_UNKNOWN_SENTINEL`. + + Returns: + A mapping from room ID to the room's type (`None` is a valid room type). + Rooms unknown to this server will return `ROOM_UNKNOWN_SENTINEL`. """ - rows = await self.db_pool.simple_select_many_batch( - table="room_stats_state", - column="room_id", - iterable=room_ids, - retcols=("room_id", "room_type"), - desc="bulk_get_room_type", + def txn( + txn: LoggingTransaction, + ) -> MutableMapping[str, Union[Optional[str], Sentinel]]: + clause, args = make_in_list_sql_clause( + txn.database_engine, "room_id", room_ids + ) + + # We can't rely on `room_stats_state.room_type` if the server has left the + # room because the `room_id` will still be in the table but everything will + # be set to `None` but `None` is a valid room type value. We join against + # the `room_stats_current` table which keeps track of the + # `current_state_events` count (and a proxy value `local_users_in_room` + # which can used to assume the server is participating in the room and has + # current state) to ensure that the data in `room_stats_state` is up-to-date + # with the current state. + # + # FIXME: Use `room_stats_current.current_state_events` instead of + # `room_stats_current.local_users_in_room` once + # https://github.com/element-hq/synapse/issues/17457 is fixed. + sql = f""" + SELECT room_id, room_type + FROM room_stats_state + INNER JOIN room_stats_current USING (room_id) + WHERE + {clause} + AND local_users_in_room > 0 + """ + + txn.execute(sql, args) + + room_id_to_type_map = {} + for row in txn: + room_id_to_type_map[row[0]] = row[1] + + return room_id_to_type_map + + results = await self.db_pool.runInteraction( + "bulk_get_room_type", + txn, ) # If we haven't updated `room_stats_state` with the room yet, query the # create events directly. This should happen only rarely so we don't # mind if we do this in a loop. - results = dict(rows) for room_id in room_ids - results.keys(): - create_event = await self.get_create_event_for_room(room_id) - room_type = create_event.content.get(EventContentFields.ROOM_TYPE) - results[room_id] = room_type + try: + create_event = await self.get_create_event_for_room(room_id) + room_type = create_event.content.get(EventContentFields.ROOM_TYPE) + results[room_id] = room_type + except NotFoundError: + # We use the sentinel value to distinguish between `None` which is a + # valid room type and a room that is unknown to the server so the value + # is just unset. + results[room_id] = ROOM_UNKNOWN_SENTINEL + + return results + + @cached(max_entries=10000) + async def get_room_encryption(self, room_id: str) -> Optional[str]: + raise NotImplementedError() + + @cachedList(cached_method_name="get_room_encryption", list_name="room_ids") + async def bulk_get_room_encryption( + self, room_ids: Set[str] + ) -> Mapping[str, Union[Optional[str], Sentinel]]: + """ + Bulk fetch room encryption for the given rooms (via current state). + + Since this function is cached, any missing values would be cached as `None`. In + order to distinguish between an unencrypted room that has `None` encryption and + a room that is unknown to the server where we might want to omit the value + (which would make it cached as `None`), instead we use the sentinel value + `ROOM_UNKNOWN_SENTINEL`. + + Returns: + A mapping from room ID to the room's encryption algorithm if the room is + encrypted, otherwise `None`. Rooms unknown to this server will return + `ROOM_UNKNOWN_SENTINEL`. + """ + + def txn( + txn: LoggingTransaction, + ) -> MutableMapping[str, Union[Optional[str], Sentinel]]: + clause, args = make_in_list_sql_clause( + txn.database_engine, "room_id", room_ids + ) + + # We can't rely on `room_stats_state.encryption` if the server has left the + # room because the `room_id` will still be in the table but everything will + # be set to `None` but `None` is a valid encryption value. We join against + # the `room_stats_current` table which keeps track of the + # `current_state_events` count (and a proxy value `local_users_in_room` + # which can used to assume the server is participating in the room and has + # current state) to ensure that the data in `room_stats_state` is up-to-date + # with the current state. + # + # FIXME: Use `room_stats_current.current_state_events` instead of + # `room_stats_current.local_users_in_room` once + # https://github.com/element-hq/synapse/issues/17457 is fixed. + sql = f""" + SELECT room_id, encryption + FROM room_stats_state + INNER JOIN room_stats_current USING (room_id) + WHERE + {clause} + AND local_users_in_room > 0 + """ + + txn.execute(sql, args) + + room_id_to_encryption_map = {} + for row in txn: + room_id_to_encryption_map[row[0]] = row[1] + + return room_id_to_encryption_map + + results = await self.db_pool.runInteraction( + "bulk_get_room_encryption", + txn, + ) + + # If we haven't updated `room_stats_state` with the room yet, query the state + # directly. This should happen only rarely so we don't mind if we do this in a + # loop. + encryption_event_ids: List[str] = [] + for room_id in room_ids - results.keys(): + state_map = await self.get_partial_filtered_current_state_ids( + room_id, + state_filter=StateFilter.from_types( + [ + (EventTypes.Create, ""), + (EventTypes.RoomEncryption, ""), + ] + ), + ) + # We can use the create event as a canary to tell whether the server has + # seen the room before + create_event_id = state_map.get((EventTypes.Create, "")) + encryption_event_id = state_map.get((EventTypes.RoomEncryption, "")) + + if create_event_id is None: + # We use the sentinel value to distinguish between `None` which is a + # valid room type and a room that is unknown to the server so the value + # is just unset. + results[room_id] = ROOM_UNKNOWN_SENTINEL + continue + + if encryption_event_id is None: + results[room_id] = None + else: + encryption_event_ids.append(encryption_event_id) + + encryption_event_map = await self.get_events(encryption_event_ids) + + for encryption_event_id in encryption_event_ids: + encryption_event = encryption_event_map.get(encryption_event_id) + # If the curent state says there is an encryption event, we should have it + # in the database. + assert encryption_event is not None + + results[encryption_event.room_id] = encryption_event.content.get( + EventContentFields.ENCRYPTION_ALGORITHM + ) return results diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index a7aa9bb8af..96da47f3b9 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -19,7 +19,7 @@ # import logging from copy import deepcopy -from typing import Dict, Optional +from typing import Dict, List, Optional from unittest.mock import patch from parameterized import parameterized @@ -35,7 +35,7 @@ from synapse.api.constants import ( RoomTypes, ) from synapse.api.room_versions import RoomVersions -from synapse.events import make_event_from_dict +from synapse.events import StrippedStateEvent, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.handlers.sliding_sync import ( RoomSyncConfig, @@ -3093,6 +3093,78 @@ class FilterRoomsTestCase(HomeserverTestCase): return room_id + _remote_invite_count: int = 0 + + def _create_remote_invite_room_for_user( + self, + invitee_user_id: str, + unsigned_invite_room_state: Optional[List[StrippedStateEvent]], + ) -> str: + """ + Create a fake invite for a remote room and persist it. + + We don't have any state for these kind of rooms and can only rely on the + stripped state included in the unsigned portion of the invite event to identify + the room. + + Args: + invitee_user_id: The person being invited + unsigned_invite_room_state: List of stripped state events to assist the + receiver in identifying the room. + + Returns: + The room ID of the remote invite room + """ + invite_room_id = f"!test_room{self._remote_invite_count}:remote_server" + + invite_event_dict = { + "room_id": invite_room_id, + "sender": "@inviter:remote_server", + "state_key": invitee_user_id, + "depth": 1, + "origin_server_ts": 1, + "type": EventTypes.Member, + "content": {"membership": Membership.INVITE}, + "auth_events": [], + "prev_events": [], + } + if unsigned_invite_room_state is not None: + serialized_stripped_state_events = [] + for stripped_event in unsigned_invite_room_state: + serialized_stripped_state_events.append( + { + "type": stripped_event.type, + "state_key": stripped_event.state_key, + "sender": stripped_event.sender, + "content": stripped_event.content, + } + ) + + invite_event_dict["unsigned"] = { + "invite_room_state": serialized_stripped_state_events + } + + invite_event = make_event_from_dict( + invite_event_dict, + room_version=RoomVersions.V10, + ) + invite_event.internal_metadata.outlier = True + invite_event.internal_metadata.out_of_band_membership = True + + self.get_success( + self.store.maybe_store_room_on_outlier_membership( + room_id=invite_room_id, room_version=invite_event.room_version + ) + ) + context = EventContext.for_outlier(self.hs.get_storage_controllers()) + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + self.get_success(persist_controller.persist_event(invite_event, context)) + + self._remote_invite_count += 1 + + return invite_room_id + def test_filter_dm_rooms(self) -> None: """ Test `filter.is_dm` for DM rooms @@ -3157,7 +3229,7 @@ class FilterRoomsTestCase(HomeserverTestCase): user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") - # Create a normal room + # Create an unencrypted room room_id = self.helper.create_room_as(user1_id, tok=user1_tok) # Create an encrypted room @@ -3165,7 +3237,7 @@ class FilterRoomsTestCase(HomeserverTestCase): self.helper.send_state( encrypted_room_id, EventTypes.RoomEncryption, - {"algorithm": "m.megolm.v1.aes-sha2"}, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, tok=user1_tok, ) @@ -3206,6 +3278,460 @@ class FilterRoomsTestCase(HomeserverTestCase): self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) + def test_filter_encrypted_server_left_room(self) -> None: + """ + Test that we can apply a `filter.is_encrypted` against a room that everyone has left. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + before_rooms_token = self.event_sources.get_current_token() + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Leave the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + # Leave the room + self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + # We're using a `from_token` so that the room is considered `newly_left` and + # appears in our list of relevant sync rooms + from_token=before_rooms_token, + to_token=after_rooms_token, + ) + + # Try with `is_encrypted=True` + truthy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=True, + ), + after_rooms_token, + ) + ) + + self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) + + # Try with `is_encrypted=False` + falsy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=False, + ), + after_rooms_token, + ) + ) + + self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) + + def test_filter_encrypted_server_left_room2(self) -> None: + """ + Test that we can apply a `filter.is_encrypted` against a room that everyone has + left. + + There is still someone local who is invited to the rooms but that doesn't affect + whether the server is participating in the room (users need to be joined). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + _user2_tok = self.login(user2_id, "pass") + + before_rooms_token = self.event_sources.get_current_token() + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Invite user2 + self.helper.invite(room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + # Invite user2 + self.helper.invite(encrypted_room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + # We're using a `from_token` so that the room is considered `newly_left` and + # appears in our list of relevant sync rooms + from_token=before_rooms_token, + to_token=after_rooms_token, + ) + + # Try with `is_encrypted=True` + truthy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=True, + ), + after_rooms_token, + ) + ) + + self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) + + # Try with `is_encrypted=False` + falsy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=False, + ), + after_rooms_token, + ) + ) + + self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) + + def test_filter_encrypted_after_we_left(self) -> None: + """ + Test that we can apply a `filter.is_encrypted` against a room that was encrypted + after we left the room (make sure we don't just use the current state) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + before_rooms_token = self.event_sources.get_current_token() + + # Create an unencrypted room + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + # Leave the room + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create a room that will be encrypted + encrypted_after_we_left_room_id = self.helper.create_room_as( + user2_id, tok=user2_tok + ) + # Leave the room + self.helper.join(encrypted_after_we_left_room_id, user1_id, tok=user1_tok) + self.helper.leave(encrypted_after_we_left_room_id, user1_id, tok=user1_tok) + + # Encrypt the room after we've left + self.helper.send_state( + encrypted_after_we_left_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + # We're using a `from_token` so that the room is considered `newly_left` and + # appears in our list of relevant sync rooms + from_token=before_rooms_token, + to_token=after_rooms_token, + ) + + # Try with `is_encrypted=True` + truthy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=True, + ), + after_rooms_token, + ) + ) + + # Even though we left the room before it was encrypted, we still see it because + # someone else on our server is still participating in the room and we "leak" + # the current state to the left user. But we consider the room encryption status + # to not be a secret given it's often set at the start of the room and it's one + # of the stripped state events that is normally handed out. + self.assertEqual( + truthy_filtered_room_map.keys(), {encrypted_after_we_left_room_id} + ) + + # Try with `is_encrypted=False` + falsy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=False, + ), + after_rooms_token, + ) + ) + + # Even though we left the room before it was encrypted... (see comment above) + self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) + + def test_filter_encrypted_with_remote_invite_room_no_stripped_state(self) -> None: + """ + Test that we can apply a `filter.is_encrypted` filter against a remote invite + room without any `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room without any `unsigned.invite_room_state` + _remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, None + ) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, + ) + + # Try with `is_encrypted=True` + truthy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=True, + ), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should not appear because we can't figure out whether + # it is encrypted or not (no stripped state, `unsigned.invite_room_state`). + self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) + + # Try with `is_encrypted=False` + falsy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=False, + ), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should not appear because we can't figure out whether + # it is encrypted or not (no stripped state, `unsigned.invite_room_state`). + self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) + + def test_filter_encrypted_with_remote_invite_encrypted_room(self) -> None: + """ + Test that we can apply a `filter.is_encrypted` filter against a remote invite + encrypted room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, + ) + + # Try with `is_encrypted=True` + truthy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=True, + ), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should appear here because it is encrypted + # according to the stripped state + self.assertEqual( + truthy_filtered_room_map.keys(), {encrypted_room_id, remote_invite_room_id} + ) + + # Try with `is_encrypted=False` + falsy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=False, + ), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should not appear here because it is encrypted + # according to the stripped state + self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) + + def test_filter_encrypted_with_remote_invite_unencrypted_room(self) -> None: + """ + Test that we can apply a `filter.is_encrypted` filter against a remote invite + unencrypted room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # but don't set any room encryption event. + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + # No room encryption event + ], + ) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, + ) + + # Try with `is_encrypted=True` + truthy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=True, + ), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should not appear here because it is unencrypted + # according to the stripped state + self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) + + # Try with `is_encrypted=False` + falsy_filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters( + is_encrypted=False, + ), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should appear because it is unencrypted according to + # the stripped state + self.assertEqual( + falsy_filtered_room_map.keys(), {room_id, remote_invite_room_id} + ) + def test_filter_invite_rooms(self) -> None: """ Test `filter.is_invite` for rooms that the user has been invited to @@ -3461,47 +3987,159 @@ class FilterRoomsTestCase(HomeserverTestCase): self.assertEqual(filtered_room_map.keys(), {space_room_id}) - def test_filter_room_types_with_invite_remote_room(self) -> None: - """Test that we can apply a room type filter, even if we have an invite - for a remote room. - - This is a regression test. + def test_filter_room_types_server_left_room(self) -> None: + """ + Test that we can apply a `filter.room_types` against a room that everyone has left. """ - user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") - # Create a fake remote invite and persist it. - invite_room_id = "!some:room" - invite_event = make_event_from_dict( - { - "room_id": invite_room_id, - "sender": "@user:test.serv", - "state_key": user1_id, - "depth": 1, - "origin_server_ts": 1, - "type": EventTypes.Member, - "content": {"membership": Membership.INVITE}, - "auth_events": [], - "prev_events": [], - }, - room_version=RoomVersions.V10, - ) - invite_event.internal_metadata.outlier = True - invite_event.internal_metadata.out_of_band_membership = True - - self.get_success( - self.store.maybe_store_room_on_outlier_membership( - room_id=invite_room_id, room_version=invite_event.room_version - ) - ) - context = EventContext.for_outlier(self.hs.get_storage_controllers()) - persist_controller = self.hs.get_storage_controllers().persistence - assert persist_controller is not None - self.get_success(persist_controller.persist_event(invite_event, context)) + before_rooms_token = self.event_sources.get_current_token() # Create a normal room (no room type) room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Leave the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Leave the room + self.helper.leave(space_room_id, user1_id, tok=user1_tok) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + # We're using a `from_token` so that the room is considered `newly_left` and + # appears in our list of relevant sync rooms + from_token=before_rooms_token, + to_token=after_rooms_token, + ) + + # Try finding only normal rooms + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), + after_rooms_token, + ) + ) + + self.assertEqual(filtered_room_map.keys(), {room_id}) + + # Try finding only spaces + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), + after_rooms_token, + ) + ) + + self.assertEqual(filtered_room_map.keys(), {space_room_id}) + + def test_filter_room_types_server_left_room2(self) -> None: + """ + Test that we can apply a `filter.room_types` against a room that everyone has left. + + There is still someone local who is invited to the rooms but that doesn't affect + whether the server is participating in the room (users need to be joined). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + _user2_tok = self.login(user2_id, "pass") + + before_rooms_token = self.event_sources.get_current_token() + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Invite user2 + self.helper.invite(room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Invite user2 + self.helper.invite(space_room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(space_room_id, user1_id, tok=user1_tok) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + # We're using a `from_token` so that the room is considered `newly_left` and + # appears in our list of relevant sync rooms + from_token=before_rooms_token, + to_token=after_rooms_token, + ) + + # Try finding only normal rooms + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), + after_rooms_token, + ) + ) + + self.assertEqual(filtered_room_map.keys(), {room_id}) + + # Try finding only spaces + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), + after_rooms_token, + ) + ) + + self.assertEqual(filtered_room_map.keys(), {space_room_id}) + + def test_filter_room_types_with_remote_invite_room_no_stripped_state(self) -> None: + """ + Test that we can apply a `filter.room_types` filter against a remote invite + room without any `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room without any `unsigned.invite_room_state` + _remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, None + ) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) after_rooms_token = self.event_sources.get_current_token() @@ -3512,18 +4150,186 @@ class FilterRoomsTestCase(HomeserverTestCase): to_token=after_rooms_token, ) + # Try finding only normal rooms filtered_room_map = self.get_success( self.sliding_sync_handler.filter_rooms( UserID.from_string(user1_id), sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - room_types=[None, RoomTypes.SPACE], - ), + SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), after_rooms_token, ) ) - self.assertEqual(filtered_room_map.keys(), {room_id, invite_room_id}) + # `remote_invite_room_id` should not appear because we can't figure out what + # room type it is (no stripped state, `unsigned.invite_room_state`) + self.assertEqual(filtered_room_map.keys(), {room_id}) + + # Try finding only spaces + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should not appear because we can't figure out what + # room type it is (no stripped state, `unsigned.invite_room_state`) + self.assertEqual(filtered_room_map.keys(), {space_room_id}) + + def test_filter_room_types_with_remote_invite_space(self) -> None: + """ + Test that we can apply a `filter.room_types` filter against a remote invite + to a space room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` indicating + # that it is a space room + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + # Specify that it is a space room + EventContentFields.ROOM_TYPE: RoomTypes.SPACE, + }, + ), + ], + ) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, + ) + + # Try finding only normal rooms + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should not appear here because it is a space room + # according to the stripped state + self.assertEqual(filtered_room_map.keys(), {room_id}) + + # Try finding only spaces + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should appear here because it is a space room + # according to the stripped state + self.assertEqual( + filtered_room_map.keys(), {space_room_id, remote_invite_room_id} + ) + + def test_filter_room_types_with_remote_invite_normal_room(self) -> None: + """ + Test that we can apply a `filter.room_types` filter against a remote invite + to a normal room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # but the create event does not specify a room type (normal room) + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + # No room type means this is a normal room + }, + ), + ], + ) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + after_rooms_token = self.event_sources.get_current_token() + + # Get the rooms the user should be syncing with + sync_room_map = self._get_sync_room_ids_for_user( + UserID.from_string(user1_id), + from_token=None, + to_token=after_rooms_token, + ) + + # Try finding only normal rooms + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should appear here because it is a normal room + # according to the stripped state (no room type) + self.assertEqual(filtered_room_map.keys(), {room_id, remote_invite_room_id}) + + # Try finding only spaces + filtered_room_map = self.get_success( + self.sliding_sync_handler.filter_rooms( + UserID.from_string(user1_id), + sync_room_map, + SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), + after_rooms_token, + ) + ) + + # `remote_invite_room_id` should not appear here because it is a normal room + # according to the stripped state (no room type) + self.assertEqual(filtered_room_map.keys(), {space_room_id}) class SortRoomsTestCase(HomeserverTestCase): diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 1184adde70..53f8ae7ece 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -37,6 +37,7 @@ from synapse.api.constants import ( Membership, ReceiptTypes, RelationTypes, + RoomTypes, ) from synapse.api.room_versions import RoomVersions from synapse.events import EventBase @@ -1850,6 +1851,150 @@ class SlidingSyncTestCase(SlidingSyncBase): }, ) + def test_filter_regardless_of_membership_server_left_room(self) -> None: + """ + Test that filters apply to rooms regardless of membership. We're also + compounding the problem by having all of the local users leave the room causing + our server to leave the room. + + We want to make sure that if someone is filtering rooms, and leaves, you still + get that final update down sync that you left. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a normal room + room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create an encrypted space room + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + self.helper.send_state( + space_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + self.helper.join(space_room_id, user1_id, tok=user1_tok) + + # Make an initial Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "all-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": { + "is_encrypted": True, + "room_types": [RoomTypes.SPACE], + }, + }, + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + from_token = channel.json_body["pos"] + + # Make sure the response has the lists we requested + self.assertListEqual( + list(channel.json_body["lists"].keys()), + ["all-list", "foo-list"], + channel.json_body["lists"].keys(), + ) + + # Make sure the lists have the correct rooms + self.assertListEqual( + list(channel.json_body["lists"]["all-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id, room_id], + } + ], + ) + self.assertListEqual( + list(channel.json_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id], + } + ], + ) + + # Everyone leaves the encrypted space room + self.helper.leave(space_room_id, user1_id, tok=user1_tok) + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + + # Make an incremental Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?pos={from_token}", + { + "lists": { + "all-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": { + "is_encrypted": True, + "room_types": [RoomTypes.SPACE], + }, + }, + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Make sure the lists have the correct rooms even though we `newly_left` + self.assertListEqual( + list(channel.json_body["lists"]["all-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id, room_id], + } + ], + ) + self.assertListEqual( + list(channel.json_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id], + } + ], + ) + def test_sort_list(self) -> None: """ Test that the `lists` are sorted by `stream_ordering` From 1d6186265ad0715fb2e12d8b5b5dbeceedfbb86f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 31 Jul 2024 11:47:26 -0500 Subject: [PATCH 089/332] Sliding Sync: Fix `limited` response description (make accurate) (#17507) --- changelog.d/17507.misc | 1 + synapse/types/handlers/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17507.misc diff --git a/changelog.d/17507.misc b/changelog.d/17507.misc new file mode 100644 index 0000000000..82c4d263be --- /dev/null +++ b/changelog.d/17507.misc @@ -0,0 +1 @@ +Update the `limited` field description in the Sliding Sync response to accurately describe what it actually represents. diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 2f7e92665c..a6adf6c9ea 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -182,8 +182,8 @@ class SlidingSyncResult: absent on joined/left rooms prev_batch: A token that can be passed as a start parameter to the `/rooms//messages` API to retrieve earlier messages. - limited: True if their are more events than fit between the given position and now. - Sync again to get more. + limited: True if there are more events than `timeline_limit` looking + backwards from the `response.pos` to the `request.pos`. num_live: The number of timeline events which have just occurred and are not historical. The last N events are 'live' and should be treated as such. This is mostly useful to determine whether a given @mention event should make a noise or not. From 39731bb20592cb836f5297d2cf8655814f54e451 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 31 Jul 2024 12:20:46 -0500 Subject: [PATCH 090/332] Sliding Sync: Split and move tests (#17504) Split and move Sliding Sync tests so we have some more sane test file sizes --- changelog.d/17504.misc | 1 + tests/rest/client/sliding_sync/__init__.py | 13 + .../sliding_sync/test_connection_tracking.py | 453 ++ .../test_extension_account_data.py | 495 ++ .../sliding_sync/test_extension_e2ee.py | 441 ++ .../sliding_sync/test_extension_receipts.py | 679 ++ .../sliding_sync/test_extension_to_device.py | 278 + .../client/sliding_sync/test_extensions.py | 267 + .../sliding_sync/test_room_subscriptions.py | 285 + .../client/sliding_sync/test_rooms_invites.py | 510 ++ .../client/sliding_sync/test_rooms_meta.py | 710 ++ .../sliding_sync/test_rooms_required_state.py | 713 ++ .../sliding_sync/test_rooms_timeline.py | 493 ++ .../client/sliding_sync/test_sliding_sync.py | 974 +++ tests/rest/client/test_sync.py | 5863 +---------------- 15 files changed, 6315 insertions(+), 5860 deletions(-) create mode 100644 changelog.d/17504.misc create mode 100644 tests/rest/client/sliding_sync/__init__.py create mode 100644 tests/rest/client/sliding_sync/test_connection_tracking.py create mode 100644 tests/rest/client/sliding_sync/test_extension_account_data.py create mode 100644 tests/rest/client/sliding_sync/test_extension_e2ee.py create mode 100644 tests/rest/client/sliding_sync/test_extension_receipts.py create mode 100644 tests/rest/client/sliding_sync/test_extension_to_device.py create mode 100644 tests/rest/client/sliding_sync/test_extensions.py create mode 100644 tests/rest/client/sliding_sync/test_room_subscriptions.py create mode 100644 tests/rest/client/sliding_sync/test_rooms_invites.py create mode 100644 tests/rest/client/sliding_sync/test_rooms_meta.py create mode 100644 tests/rest/client/sliding_sync/test_rooms_required_state.py create mode 100644 tests/rest/client/sliding_sync/test_rooms_timeline.py create mode 100644 tests/rest/client/sliding_sync/test_sliding_sync.py diff --git a/changelog.d/17504.misc b/changelog.d/17504.misc new file mode 100644 index 0000000000..4ab892843d --- /dev/null +++ b/changelog.d/17504.misc @@ -0,0 +1 @@ +Split and move Sliding Sync tests so we have some more sane test file sizes. diff --git a/tests/rest/client/sliding_sync/__init__.py b/tests/rest/client/sliding_sync/__init__.py new file mode 100644 index 0000000000..c4de9d53e2 --- /dev/null +++ b/tests/rest/client/sliding_sync/__init__.py @@ -0,0 +1,13 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# diff --git a/tests/rest/client/sliding_sync/test_connection_tracking.py b/tests/rest/client/sliding_sync/test_connection_tracking.py new file mode 100644 index 0000000000..4d8866b30a --- /dev/null +++ b/tests/rest/client/sliding_sync/test_connection_tracking.py @@ -0,0 +1,453 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EventTypes +from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.types import SlidingSyncStreamToken +from synapse.types.handlers import SlidingSyncConfig +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase + +logger = logging.getLogger(__name__) + + +class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase): + """ + Test connection tracking in the Sliding Sync API. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + + def test_rooms_required_state_incremental_sync_LIVE(self) -> None: + """Test that we only get state updates in incremental sync for rooms + we've already seen (LIVE). + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Name, ""], + ], + "timeline_limit": 0, + } + } + } + + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.RoomHistoryVisibility, "")], + }, + exact=True, + ) + + # Send a state event + self.helper.send_state( + room_id1, EventTypes.Name, body={"name": "foo"}, tok=user2_tok + ) + + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self.assertNotIn("initial", response_body["rooms"][room_id1]) + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + @parameterized.expand([(False,), (True,)]) + def test_rooms_timeline_incremental_sync_PREVIOUSLY(self, limited: bool) -> None: + """ + Test getting room data where we have previously sent down the room, but + we missed sending down some timeline events previously and so its status + is considered PREVIOUSLY. + + There are two versions of this test, one where there are more messages + than the timeline limit, and one where there isn't. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + + self.helper.send(room_id1, "msg", tok=user1_tok) + + timeline_limit = 5 + conn_id = "conn_id" + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": timeline_limit, + } + }, + "conn_id": "conn_id", + } + + # The first room gets sent down the initial sync + response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok) + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + + # We now send down some events in room1 (depending on the test param). + expected_events = [] # The set of events in the timeline + if limited: + for _ in range(10): + resp = self.helper.send(room_id1, "msg1", tok=user1_tok) + expected_events.append(resp["event_id"]) + else: + resp = self.helper.send(room_id1, "msg1", tok=user1_tok) + expected_events.append(resp["event_id"]) + + # A second messages happens in the other room, so room1 won't get sent down. + self.helper.send(room_id2, "msg", tok=user1_tok) + + # Only the second room gets sent down sync. + response_body, from_token = self.do_sync( + sync_body, since=initial_from_token, tok=user1_tok + ) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id2}, response_body["rooms"] + ) + + # FIXME: This is a hack to record that the first room wasn't sent down + # sync, as we don't implement that currently. + sliding_sync_handler = self.hs.get_sliding_sync_handler() + requester = self.get_success( + self.hs.get_auth().get_user_by_access_token(user1_tok) + ) + sync_config = SlidingSyncConfig( + user=requester.user, + requester=requester, + conn_id=conn_id, + ) + + parsed_initial_from_token = self.get_success( + SlidingSyncStreamToken.from_string(self.store, initial_from_token) + ) + connection_position = self.get_success( + sliding_sync_handler.connection_store.record_rooms( + sync_config, + parsed_initial_from_token, + sent_room_ids=[], + unsent_room_ids=[room_id1], + ) + ) + + # FIXME: Now fix up `from_token` with new connect position above. + parsed_from_token = self.get_success( + SlidingSyncStreamToken.from_string(self.store, from_token) + ) + parsed_from_token = SlidingSyncStreamToken( + stream_token=parsed_from_token.stream_token, + connection_position=connection_position, + ) + from_token = self.get_success(parsed_from_token.to_string(self.store)) + + # We now send another event to room1, so we should sync all the missing events. + resp = self.helper.send(room_id1, "msg2", tok=user1_tok) + expected_events.append(resp["event_id"]) + + # This sync should contain the messages from room1 not yet sent down. + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + self.assertNotIn("initial", response_body["rooms"][room_id1]) + + self.assertEqual( + [ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]], + expected_events[-timeline_limit:], + ) + self.assertEqual(response_body["rooms"][room_id1]["limited"], limited) + self.assertEqual(response_body["rooms"][room_id1].get("required_state"), None) + + def test_rooms_required_state_incremental_sync_PREVIOUSLY(self) -> None: + """ + Test getting room data where we have previously sent down the room, but + we missed sending down some state previously and so its status is + considered PREVIOUSLY. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + + self.helper.send(room_id1, "msg", tok=user1_tok) + + conn_id = "conn_id" + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Name, ""], + ], + "timeline_limit": 0, + } + }, + "conn_id": "conn_id", + } + + # The first room gets sent down the initial sync + response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok) + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + + # We now send down some state in room1 + resp = self.helper.send_state( + room_id1, EventTypes.Name, {"name": "foo"}, tok=user1_tok + ) + name_change_id = resp["event_id"] + + # A second messages happens in the other room, so room1 won't get sent down. + self.helper.send(room_id2, "msg", tok=user1_tok) + + # Only the second room gets sent down sync. + response_body, from_token = self.do_sync( + sync_body, since=initial_from_token, tok=user1_tok + ) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id2}, response_body["rooms"] + ) + + # FIXME: This is a hack to record that the first room wasn't sent down + # sync, as we don't implement that currently. + sliding_sync_handler = self.hs.get_sliding_sync_handler() + requester = self.get_success( + self.hs.get_auth().get_user_by_access_token(user1_tok) + ) + sync_config = SlidingSyncConfig( + user=requester.user, + requester=requester, + conn_id=conn_id, + ) + + parsed_initial_from_token = self.get_success( + SlidingSyncStreamToken.from_string(self.store, initial_from_token) + ) + connection_position = self.get_success( + sliding_sync_handler.connection_store.record_rooms( + sync_config, + parsed_initial_from_token, + sent_room_ids=[], + unsent_room_ids=[room_id1], + ) + ) + + # FIXME: Now fix up `from_token` with new connect position above. + parsed_from_token = self.get_success( + SlidingSyncStreamToken.from_string(self.store, from_token) + ) + parsed_from_token = SlidingSyncStreamToken( + stream_token=parsed_from_token.stream_token, + connection_position=connection_position, + ) + from_token = self.get_success(parsed_from_token.to_string(self.store)) + + # We now send another event to room1, so we should sync all the missing state. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # This sync should contain the state changes from room1. + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + self.assertNotIn("initial", response_body["rooms"][room_id1]) + + # We should only see the name change. + self.assertEqual( + [ + ev["event_id"] + for ev in response_body["rooms"][room_id1]["required_state"] + ], + [name_change_id], + ) + + def test_rooms_required_state_incremental_sync_NEVER(self) -> None: + """ + Test getting `required_state` where we have NEVER sent down the room before + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + + self.helper.send(room_id1, "msg", tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Name, ""], + ], + "timeline_limit": 1, + } + }, + } + + # A message happens in the other room, so room1 won't get sent down. + self.helper.send(room_id2, "msg", tok=user1_tok) + + # Only the second room gets sent down sync. + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id2}, response_body["rooms"] + ) + + # We now send another event to room1, so we should send down the full + # room. + self.helper.send(room_id1, "msg2", tok=user1_tok) + + # This sync should contain the messages from room1 not yet sent down. + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.RoomHistoryVisibility, "")], + }, + exact=True, + ) + + def test_rooms_timeline_incremental_sync_NEVER(self) -> None: + """ + Test getting timeline room data where we have NEVER sent down the room + before + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 5, + } + }, + } + + expected_events = [] + for _ in range(4): + resp = self.helper.send(room_id1, "msg", tok=user1_tok) + expected_events.append(resp["event_id"]) + + # A message happens in the other room, so room1 won't get sent down. + self.helper.send(room_id2, "msg", tok=user1_tok) + + # Only the second room gets sent down sync. + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id2}, response_body["rooms"] + ) + + # We now send another event to room1 so it comes down sync + resp = self.helper.send(room_id1, "msg2", tok=user1_tok) + expected_events.append(resp["event_id"]) + + # This sync should contain the messages from room1 not yet sent down. + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertCountEqual( + response_body["rooms"].keys(), {room_id1}, response_body["rooms"] + ) + + self.assertEqual( + [ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]], + expected_events, + ) + self.assertEqual(response_body["rooms"][room_id1]["limited"], True) + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) diff --git a/tests/rest/client/sliding_sync/test_extension_account_data.py b/tests/rest/client/sliding_sync/test_extension_account_data.py new file mode 100644 index 0000000000..3482a5f887 --- /dev/null +++ b/tests/rest/client/sliding_sync/test_extension_account_data.py @@ -0,0 +1,495 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import AccountDataTypes +from synapse.rest.client import login, room, sendtodevice, sync +from synapse.server import HomeServer +from synapse.types import StreamKeyType +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase +from tests.server import TimedOutException + +logger = logging.getLogger(__name__) + + +class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): + """Tests for the account_data sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + sendtodevice.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.account_data_handler = hs.get_account_data_handler() + + def test_no_data_initial_sync(self) -> None: + """ + Test that enabling the account_data extension works during an intitial sync, + even if there is no-data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Make an initial Sliding Sync request with the account_data extension enabled + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertIncludes( + { + global_event["type"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + }, + # Even though we don't have any global account data set, Synapse saves some + # default push rules for us. + {AccountDataTypes.PUSH_RULES}, + exact=True, + ) + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_no_data_incremental_sync(self) -> None: + """ + Test that enabling account_data extension works during an incremental sync, even + if there is no-data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the account_data extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # There has been no account data changes since the `from_token` so we shouldn't + # see any account data here. + self.assertIncludes( + { + global_event["type"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + }, + set(), + exact=True, + ) + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_global_account_data_initial_sync(self) -> None: + """ + On initial sync, we should return all global account data on initial sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Update the global account data + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id=user1_id, + account_data_type="org.matrix.foobarbaz", + content={"foo": "bar"}, + ) + ) + + # Make an initial Sliding Sync request with the account_data extension enabled + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # It should show us all of the global account data + self.assertIncludes( + { + global_event["type"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + }, + {AccountDataTypes.PUSH_RULES, "org.matrix.foobarbaz"}, + exact=True, + ) + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_global_account_data_incremental_sync(self) -> None: + """ + On incremental sync, we should only account data that has changed since the + `from_token`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Add some global account data + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id=user1_id, + account_data_type="org.matrix.foobarbaz", + content={"foo": "bar"}, + ) + ) + + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Add some other global account data + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id=user1_id, + account_data_type="org.matrix.doodardaz", + content={"doo": "dar"}, + ) + ) + + # Make an incremental Sliding Sync request with the account_data extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertIncludes( + { + global_event["type"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + }, + # We should only see the new global account data that happened after the `from_token` + {"org.matrix.doodardaz"}, + exact=True, + ) + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_room_account_data_initial_sync(self) -> None: + """ + On initial sync, we return all account data for a given room but only for + rooms that we request and are being returned in the Sliding Sync response. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room and add some room account data + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + # Create another room with some room account data + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + # Make an initial Sliding Sync request with the account_data extension enabled + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "account_data": { + "enabled": True, + "rooms": [room_id1, room_id2], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertIsNotNone(response_body["extensions"]["account_data"].get("global")) + # Even though we requested room2, we only expect room1 to show up because that's + # the only room in the Sliding Sync response (room2 is not one of our room + # subscriptions or in a sliding window list). + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + self.assertIncludes( + { + event["type"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id1) + }, + {"org.matrix.roorarraz"}, + exact=True, + ) + + def test_room_account_data_incremental_sync(self) -> None: + """ + On incremental sync, we return all account data for a given room but only for + rooms that we request and are being returned in the Sliding Sync response. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room and add some room account data + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + # Create another room with some room account data + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "account_data": { + "enabled": True, + "rooms": [room_id1, room_id2], + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Add some other room account data + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + + # Make an incremental Sliding Sync request with the account_data extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertIsNotNone(response_body["extensions"]["account_data"].get("global")) + # Even though we requested room2, we only expect room1 to show up because that's + # the only room in the Sliding Sync response (room2 is not one of our room + # subscriptions or in a sliding window list). + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + # We should only see the new room account data that happened after the `from_token` + self.assertIncludes( + { + event["type"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id1) + }, + {"org.matrix.roorarraz2"}, + exact=True, + ) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the account_data extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the global account data to trigger new results + self.get_success( + self.account_data_handler.add_account_data_for_user( + user1_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the global account data update + self.assertIncludes( + { + global_event["type"] + for global_event in channel.json_body["extensions"]["account_data"].get( + "global" + ) + }, + {"org.matrix.foobarbaz"}, + exact=True, + ) + self.assertIncludes( + channel.json_body["extensions"]["account_data"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the account_data extension doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "account_data": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events( + user1_id, + # We choose `StreamKeyType.PRESENCE` because we're testing for account data + # and don't want to contaminate the account data results using + # `StreamKeyType.ACCOUNT_DATA`. + wake_stream_key=StreamKeyType.PRESENCE, + ) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + self.assertIsNotNone( + channel.json_body["extensions"]["account_data"].get("global") + ) + self.assertIsNotNone( + channel.json_body["extensions"]["account_data"].get("rooms") + ) diff --git a/tests/rest/client/sliding_sync/test_extension_e2ee.py b/tests/rest/client/sliding_sync/test_extension_e2ee.py new file mode 100644 index 0000000000..320f8c788f --- /dev/null +++ b/tests/rest/client/sliding_sync/test_extension_e2ee.py @@ -0,0 +1,441 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.rest.client import devices, login, room, sync +from synapse.server import HomeServer +from synapse.types import JsonDict, StreamKeyType +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase +from tests.server import TimedOutException + +logger = logging.getLogger(__name__) + + +class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): + """Tests for the e2ee sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + devices.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.e2e_keys_handler = hs.get_e2e_keys_handler() + + def test_no_data_initial_sync(self) -> None: + """ + Test that enabling e2ee extension works during an intitial sync, even if there + is no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Make an initial Sliding Sync request with the e2ee extension enabled + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Device list updates are only present for incremental syncs + self.assertIsNone(response_body["extensions"]["e2ee"].get("device_lists")) + + # Both of these should be present even when empty + self.assertEqual( + response_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # This is always present because of + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_no_data_incremental_sync(self) -> None: + """ + Test that enabling e2ee extension works during an incremental sync, even if + there is no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the e2ee extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Device list shows up for incremental syncs + self.assertEqual( + response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"), + [], + ) + self.assertEqual( + response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + # Both of these should be present even when empty + self.assertEqual( + response_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + test_device_id = "TESTDEVICE" + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass", device_id=test_device_id) + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.join(room_id, user3_id, tok=user3_tok) + + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the device lists to trigger new results + # Have user3 update their device list + device_update_channel = self.make_request( + "PUT", + f"/devices/{test_device_id}", + { + "display_name": "New Device Name", + }, + access_token=user3_tok, + ) + self.assertEqual( + device_update_channel.code, 200, device_update_channel.json_body + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the device list update + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [user3_id], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the E2EE extension doesn't trigger a false-positive for new data (see + `device_one_time_keys_count.signed_curve25519`). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events( + user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA + ) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device lists are present for incremental syncs but empty because no device changes + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + # Both of these should be present even when empty + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_device_lists(self) -> None: + """ + Test that device list updates are included in the response + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + test_device_id = "TESTDEVICE" + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass", device_id=test_device_id) + + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.join(room_id, user3_id, tok=user3_tok) + self.helper.join(room_id, user4_id, tok=user4_tok) + + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Have user3 update their device list + channel = self.make_request( + "PUT", + f"/devices/{test_device_id}", + { + "display_name": "New Device Name", + }, + access_token=user3_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # User4 leaves the room + self.helper.leave(room_id, user4_id, tok=user4_tok) + + # Make an incremental Sliding Sync request with the e2ee extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Device list updates show up + self.assertEqual( + response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"), + [user3_id], + ) + self.assertEqual( + response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [user4_id], + ) + + def test_device_one_time_keys_count(self) -> None: + """ + Test that `device_one_time_keys_count` are included in the response + """ + test_device_id = "TESTDEVICE" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", device_id=test_device_id) + + # Upload one time keys for the user/device + keys: JsonDict = { + "alg1:k1": "key1", + "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}}, + "alg2:k3": {"key": "key3"}, + } + upload_keys_response = self.get_success( + self.e2e_keys_handler.upload_keys_for_user( + user1_id, test_device_id, {"one_time_keys": keys} + ) + ) + self.assertDictEqual( + upload_keys_response, + { + "one_time_key_counts": { + "alg1": 1, + "alg2": 2, + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0, + } + }, + ) + + # Make a Sliding Sync request with the e2ee extension enabled + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Check for those one time key counts + self.assertEqual( + response_body["extensions"]["e2ee"].get("device_one_time_keys_count"), + { + "alg1": 1, + "alg2": 2, + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0, + }, + ) + + def test_device_unused_fallback_key_types(self) -> None: + """ + Test that `device_unused_fallback_key_types` are included in the response + """ + test_device_id = "TESTDEVICE" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", device_id=test_device_id) + + # We shouldn't have any unused fallback keys yet + res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id) + ) + self.assertEqual(res, []) + + # Upload a fallback key for the user/device + self.get_success( + self.e2e_keys_handler.upload_keys_for_user( + user1_id, + test_device_id, + {"fallback_keys": {"alg1:k1": "fallback_key1"}}, + ) + ) + # We should now have an unused alg1 key + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id) + ) + self.assertEqual(fallback_res, ["alg1"], fallback_res) + + # Make a Sliding Sync request with the e2ee extension enabled + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Check for the unused fallback key types + self.assertListEqual( + response_body["extensions"]["e2ee"].get("device_unused_fallback_key_types"), + ["alg1"], + ) diff --git a/tests/rest/client/sliding_sync/test_extension_receipts.py b/tests/rest/client/sliding_sync/test_extension_receipts.py new file mode 100644 index 0000000000..65fbac260e --- /dev/null +++ b/tests/rest/client/sliding_sync/test_extension_receipts.py @@ -0,0 +1,679 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EduTypes, ReceiptTypes +from synapse.rest.client import login, receipts, room, sync +from synapse.server import HomeServer +from synapse.types import StreamKeyType +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase +from tests.server import TimedOutException + +logger = logging.getLogger(__name__) + + +class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): + """Tests for the receipts sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + receipts.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def test_no_data_initial_sync(self) -> None: + """ + Test that enabling the receipts extension works during an intitial sync, + even if there is no-data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Make an initial Sliding Sync request with the receipts extension enabled + sync_body = { + "lists": {}, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_no_data_incremental_sync(self) -> None: + """ + Test that enabling receipts extension works during an incremental sync, even + if there is no-data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the receipts extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_receipts_initial_sync_with_timeline(self) -> None: + """ + On initial sync, we only return receipts for events in a given room's timeline. + + We also make sure that we only return receipts for rooms that we request and are + already being returned in the Sliding Sync response. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + # Create a room + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + room1_event_response1 = self.helper.send( + room_id1, body="new event1", tok=user2_tok + ) + room1_event_response2 = self.helper.send( + room_id1, body="new event2", tok=user2_tok + ) + # User1 reads the last event + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User2 reads the last event + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}", + {}, + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User3 reads the first event + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", + {}, + access_token=user3_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User4 privately reads the last event (make sure this doesn't leak to the other users) + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ_PRIVATE}/{room1_event_response2['event_id']}", + {}, + access_token=user4_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Create another room + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + self.helper.join(room_id2, user3_id, tok=user3_tok) + self.helper.join(room_id2, user4_id, tok=user4_tok) + room2_event_response1 = self.helper.send( + room_id2, body="new event2", tok=user2_tok + ) + # User1 reads the last event + channel = self.make_request( + "POST", + f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User2 reads the last event + channel = self.make_request( + "POST", + f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}", + {}, + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User4 privately reads the last event (make sure this doesn't leak to the other users) + channel = self.make_request( + "POST", + f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}", + {}, + access_token=user4_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Make an initial Sliding Sync request with the receipts extension enabled + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + # On initial sync, we only have receipts for events in the timeline + "timeline_limit": 1, + } + }, + "extensions": { + "receipts": { + "enabled": True, + "rooms": [room_id1, room_id2], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Only the latest event in the room is in the timelie because the `timeline_limit` is 1 + self.assertIncludes( + { + event["event_id"] + for event in response_body["rooms"][room_id1].get("timeline", []) + }, + {room1_event_response2["event_id"]}, + exact=True, + message=str(response_body["rooms"][room_id1]), + ) + + # Even though we requested room2, we only expect room1 to show up because that's + # the only room in the Sliding Sync response (room2 is not one of our room + # subscriptions or in a sliding window list). + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["receipts"]["rooms"][room_id1]["type"], + EduTypes.RECEIPT, + ) + # We can see user1 and user2 read receipts + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response2["event_id"] + ][ReceiptTypes.READ].keys(), + {user1_id, user2_id}, + exact=True, + ) + # User1 did not have a private read receipt and we shouldn't leak others' + # private read receipts + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response2["event_id"] + ] + .get(ReceiptTypes.READ_PRIVATE, {}) + .keys(), + set(), + exact=True, + ) + + # We shouldn't see receipts for event2 since it wasn't in the timeline and this is an initial sync + self.assertIsNone( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"].get( + room1_event_response1["event_id"] + ) + ) + + def test_receipts_incremental_sync(self) -> None: + """ + On incremental sync, we return all receipts in the token range for a given room + but only for rooms that we request and are being returned in the Sliding Sync + response. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + # Create room1 + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + room1_event_response1 = self.helper.send( + room_id1, body="new event2", tok=user2_tok + ) + # User2 reads the last event (before the `from_token`) + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", + {}, + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Create room2 + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + room2_event_response1 = self.helper.send( + room_id2, body="new event2", tok=user2_tok + ) + # User1 reads the last event (before the `from_token`) + channel = self.make_request( + "POST", + f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Create room3 + room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id3, user1_id, tok=user1_tok) + self.helper.join(room_id3, user3_id, tok=user3_tok) + room3_event_response1 = self.helper.send( + room_id3, body="new event", tok=user2_tok + ) + + # Create room4 + room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id4, user1_id, tok=user1_tok) + self.helper.join(room_id4, user3_id, tok=user3_tok) + event_response4 = self.helper.send(room_id4, body="new event", tok=user2_tok) + # User1 reads the last event (before the `from_token`) + channel = self.make_request( + "POST", + f"/rooms/{room_id4}/receipt/{ReceiptTypes.READ}/{event_response4['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + }, + room_id3: { + "required_state": [], + "timeline_limit": 0, + }, + room_id4: { + "required_state": [], + "timeline_limit": 0, + }, + }, + "extensions": { + "receipts": { + "enabled": True, + "rooms": [room_id1, room_id2, room_id3, room_id4], + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Add some more read receipts after the `from_token` + # + # User1 reads room1 + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User1 privately reads room2 + channel = self.make_request( + "POST", + f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User3 reads room3 + channel = self.make_request( + "POST", + f"/rooms/{room_id3}/receipt/{ReceiptTypes.READ}/{room3_event_response1['event_id']}", + {}, + access_token=user3_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # No activity for room4 after the `from_token` + + # Make an incremental Sliding Sync request with the receipts extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Even though we requested room2, we only expect rooms to show up if they are + # already in the Sliding Sync response. room4 doesn't show up because there is + # no activity after the `from_token`. + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id1, room_id3}, + exact=True, + ) + + # Check room1: + # + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["receipts"]["rooms"][room_id1]["type"], + EduTypes.RECEIPT, + ) + # We only see that user1 has read something in room1 since the `from_token` + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response1["event_id"] + ][ReceiptTypes.READ].keys(), + {user1_id}, + exact=True, + ) + # User1 did not send a private read receipt in this room and we shouldn't leak + # others' private read receipts + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response1["event_id"] + ] + .get(ReceiptTypes.READ_PRIVATE, {}) + .keys(), + set(), + exact=True, + ) + # No events in the timeline since they were sent before the `from_token` + self.assertNotIn(room_id1, response_body["rooms"]) + + # Check room3: + # + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["receipts"]["rooms"][room_id3]["type"], + EduTypes.RECEIPT, + ) + # We only see that user3 has read something in room1 since the `from_token` + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][ + room3_event_response1["event_id"] + ][ReceiptTypes.READ].keys(), + {user3_id}, + exact=True, + ) + # User1 did not send a private read receipt in this room and we shouldn't leak + # others' private read receipts + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][ + room3_event_response1["event_id"] + ] + .get(ReceiptTypes.READ_PRIVATE, {}) + .keys(), + set(), + exact=True, + ) + # No events in the timeline since they were sent before the `from_token` + self.assertNotIn(room_id3, response_body["rooms"]) + + def test_receipts_incremental_sync_all_live_receipts(self) -> None: + """ + On incremental sync, we return all receipts in the token range for a given room + even if they are not in the timeline. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create room1 + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + # The timeline will only include event2 + "timeline_limit": 1, + }, + }, + "extensions": { + "receipts": { + "enabled": True, + "rooms": [room_id1], + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + room1_event_response1 = self.helper.send( + room_id1, body="new event1", tok=user2_tok + ) + room1_event_response2 = self.helper.send( + room_id1, body="new event2", tok=user2_tok + ) + + # User1 reads event1 + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User2 reads event2 + channel = self.make_request( + "POST", + f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}", + {}, + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Make an incremental Sliding Sync request with the receipts extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # We should see room1 because it has receipts in the token range + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["receipts"]["rooms"][room_id1]["type"], + EduTypes.RECEIPT, + ) + # We should see all receipts in the token range regardless of whether the events + # are in the timeline + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response1["event_id"] + ][ReceiptTypes.READ].keys(), + {user1_id}, + exact=True, + ) + self.assertIncludes( + response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ + room1_event_response2["event_id"] + ][ReceiptTypes.READ].keys(), + {user2_id}, + exact=True, + ) + # Only the latest event in the timeline because the `timeline_limit` is 1 + self.assertIncludes( + { + event["event_id"] + for event in response_body["rooms"][room_id1].get("timeline", []) + }, + {room1_event_response2["event_id"]}, + exact=True, + message=str(response_body["rooms"][room_id1]), + ) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + event_response = self.helper.send(room_id, body="new event", tok=user2_tok) + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id: { + "required_state": [], + "timeline_limit": 0, + }, + }, + "extensions": { + "receipts": { + "enabled": True, + "rooms": [room_id], + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the receipts extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the receipts to trigger new results + receipt_channel = self.make_request( + "POST", + f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}", + {}, + access_token=user2_tok, + ) + self.assertEqual(receipt_channel.code, 200, receipt_channel.json_body) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the new receipt + self.assertIncludes( + channel.json_body.get("extensions", {}) + .get("receipts", {}) + .get("rooms", {}) + .keys(), + {room_id}, + exact=True, + message=str(channel.json_body), + ) + self.assertIncludes( + channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][ + event_response["event_id"] + ][ReceiptTypes.READ].keys(), + {user2_id}, + exact=True, + ) + # User1 did not send a private read receipt in this room and we shouldn't leak + # others' private read receipts + self.assertIncludes( + channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][ + event_response["event_id"] + ] + .get(ReceiptTypes.READ_PRIVATE, {}) + .keys(), + set(), + exact=True, + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the receipts extension doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events( + user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA + ) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + self.assertIncludes( + channel.json_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) diff --git a/tests/rest/client/sliding_sync/test_extension_to_device.py b/tests/rest/client/sliding_sync/test_extension_to_device.py new file mode 100644 index 0000000000..f8500812ea --- /dev/null +++ b/tests/rest/client/sliding_sync/test_extension_to_device.py @@ -0,0 +1,278 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging +from typing import List + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.rest.client import login, sendtodevice, sync +from synapse.server import HomeServer +from synapse.types import JsonDict, StreamKeyType +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase +from tests.server import TimedOutException + +logger = logging.getLogger(__name__) + + +class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): + """Tests for the to-device sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + sync.register_servlets, + sendtodevice.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def _assert_to_device_response( + self, response_body: JsonDict, expected_messages: List[JsonDict] + ) -> str: + """Assert the sliding sync response was successful and has the expected + to-device messages. + + Returns the next_batch token from the to-device section. + """ + extensions = response_body["extensions"] + to_device = extensions["to_device"] + self.assertIsInstance(to_device["next_batch"], str) + self.assertEqual(to_device["events"], expected_messages) + + return to_device["next_batch"] + + def test_no_data(self) -> None: + """Test that enabling to-device extension works, even if there is + no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # We expect no to-device messages + self._assert_to_device_response(response_body, []) + + def test_data_initial_sync(self) -> None: + """Test that we get to-device messages when we don't specify a since + token""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", "d1") + user2_id = self.register_user("u2", "pass") + user2_tok = self.login(user2_id, "pass", "d2") + + # Send the to-device message + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user1_id: {"d1": test_msg}}}, + access_token=user2_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self._assert_to_device_response( + response_body, + [{"content": test_msg, "sender": user2_id, "type": "m.test"}], + ) + + def test_data_incremental_sync(self) -> None: + """Test that we get to-device messages over incremental syncs""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", "d1") + user2_id = self.register_user("u2", "pass") + user2_tok = self.login(user2_id, "pass", "d2") + + sync_body: JsonDict = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # No to-device messages yet. + next_batch = self._assert_to_device_response(response_body, []) + + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user1_id: {"d1": test_msg}}}, + access_token=user2_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + "since": next_batch, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + next_batch = self._assert_to_device_response( + response_body, + [{"content": test_msg, "sender": user2_id, "type": "m.test"}], + ) + + # The next sliding sync request should not include the to-device + # message. + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + "since": next_batch, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self._assert_to_device_response(response_body, []) + + # An initial sliding sync request should not include the to-device + # message, as it should have been deleted + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self._assert_to_device_response(response_body, []) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", "d1") + user2_id = self.register_user("u2", "pass") + user2_tok = self.login(user2_id, "pass", "d2") + + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the to-device messages to trigger new results + test_msg = {"foo": "bar"} + send_to_device_channel = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user1_id: {"d1": test_msg}}}, + access_token=user2_tok, + ) + self.assertEqual( + send_to_device_channel.code, 200, send_to_device_channel.result + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + self._assert_to_device_response( + channel.json_body, + [{"content": test_msg, "sender": user2_id, "type": "m.test"}], + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the To-Device extension doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events( + user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA + ) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + self._assert_to_device_response(channel.json_body, []) diff --git a/tests/rest/client/sliding_sync/test_extensions.py b/tests/rest/client/sliding_sync/test_extensions.py new file mode 100644 index 0000000000..e42904b69b --- /dev/null +++ b/tests/rest/client/sliding_sync/test_extensions.py @@ -0,0 +1,267 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import ReceiptTypes +from synapse.rest.client import login, receipts, room, sync +from synapse.server import HomeServer +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase + +logger = logging.getLogger(__name__) + + +class SlidingSyncExtensionsTestCase(SlidingSyncBase): + """ + Test general extensions behavior in the Sliding Sync API. Each extension has their + own suite of tests in their own file as well. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + receipts.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + self.account_data_handler = hs.get_account_data_handler() + + # Any extensions that use `lists`/`rooms` should be tested here + @parameterized.expand([("account_data",), ("receipts",)]) + def test_extensions_lists_rooms_relevant_rooms(self, extension_name: str) -> None: + """ + With various extensions, test out requesting different variations of + `lists`/`rooms`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create some rooms + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id4 = self.helper.create_room_as(user1_id, tok=user1_tok) + room_id5 = self.helper.create_room_as(user1_id, tok=user1_tok) + + room_id_to_human_name_map = { + room_id1: "room1", + room_id2: "room2", + room_id3: "room3", + room_id4: "room4", + room_id5: "room5", + } + + for room_id in room_id_to_human_name_map.keys(): + if extension_name == "account_data": + # Add some account data to each room + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + elif extension_name == "receipts": + event_response = self.helper.send( + room_id, body="new event", tok=user1_tok + ) + # Read last event + channel = self.make_request( + "POST", + f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}", + {}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + else: + raise AssertionError(f"Unknown extension name: {extension_name}") + + main_sync_body = { + "lists": { + # We expect this list range to include room5 and room4 + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + }, + # We expect this list range to include room5, room4, room3 + "bar-list": { + "ranges": [[0, 2]], + "required_state": [], + "timeline_limit": 0, + }, + }, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + } + + # Mix lists and rooms + sync_body = { + **main_sync_body, + "extensions": { + extension_name: { + "enabled": True, + "lists": ["foo-list", "non-existent-list"], + "rooms": [room_id1, room_id2, "!non-existent-room"], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ✅ Requested via `rooms` and a room subscription exists + # room2: ❌ Requested via `rooms` but not in the response (from lists or room subscriptions) + # room3: ❌ Not requested + # room4: ✅ Shows up because requested via `lists` and list exists in the response + # room5: ✅ Shows up because requested via `lists` and list exists in the response + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"][extension_name] + .get("rooms") + .keys() + }, + {"room1", "room4", "room5"}, + exact=True, + ) + + # Try wildcards (this is the default) + sync_body = { + **main_sync_body, + "extensions": { + extension_name: { + "enabled": True, + # "lists": ["*"], + # "rooms": ["*"], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ✅ Shows up because of default `rooms` wildcard and is in one of the room subscriptions + # room2: ❌ Not requested + # room3: ✅ Shows up because of default `lists` wildcard and is in a list + # room4: ✅ Shows up because of default `lists` wildcard and is in a list + # room5: ✅ Shows up because of default `lists` wildcard and is in a list + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"][extension_name] + .get("rooms") + .keys() + }, + {"room1", "room3", "room4", "room5"}, + exact=True, + ) + + # Empty list will return nothing + sync_body = { + **main_sync_body, + "extensions": { + extension_name: { + "enabled": True, + "lists": [], + "rooms": [], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ❌ Not requested + # room2: ❌ Not requested + # room3: ❌ Not requested + # room4: ❌ Not requested + # room5: ❌ Not requested + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"][extension_name] + .get("rooms") + .keys() + }, + set(), + exact=True, + ) + + # Try wildcard and none + sync_body = { + **main_sync_body, + "extensions": { + extension_name: { + "enabled": True, + "lists": ["*"], + "rooms": [], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ❌ Not requested + # room2: ❌ Not requested + # room3: ✅ Shows up because of default `lists` wildcard and is in a list + # room4: ✅ Shows up because of default `lists` wildcard and is in a list + # room5: ✅ Shows up because of default `lists` wildcard and is in a list + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"][extension_name] + .get("rooms") + .keys() + }, + {"room3", "room4", "room5"}, + exact=True, + ) + + # Try requesting a room that is only in a list + sync_body = { + **main_sync_body, + "extensions": { + extension_name: { + "enabled": True, + "lists": [], + "rooms": [room_id5], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # room1: ❌ Not requested + # room2: ❌ Not requested + # room3: ❌ Not requested + # room4: ❌ Not requested + # room5: ✅ Requested via `rooms` and is in a list + self.assertIncludes( + { + room_id_to_human_name_map[room_id] + for room_id in response_body["extensions"][extension_name] + .get("rooms") + .keys() + }, + {"room5"}, + exact=True, + ) diff --git a/tests/rest/client/sliding_sync/test_room_subscriptions.py b/tests/rest/client/sliding_sync/test_room_subscriptions.py new file mode 100644 index 0000000000..cc17b0b354 --- /dev/null +++ b/tests/rest/client/sliding_sync/test_room_subscriptions.py @@ -0,0 +1,285 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging +from http import HTTPStatus + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EventTypes, HistoryVisibility +from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase + +logger = logging.getLogger(__name__) + + +class SlidingSyncRoomSubscriptionsTestCase(SlidingSyncBase): + """ + Test `room_subscriptions` in the Sliding Sync API. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + + def test_room_subscriptions_with_join_membership(self) -> None: + """ + Test `room_subscriptions` with a joined room should give us timeline and current + state events. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request with just the room subscription + sync_body = { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # We should see some state + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + # We should see some events + self.assertEqual( + [ + event["event_id"] + for event in response_body["rooms"][room_id1]["timeline"] + ], + [ + join_response["event_id"], + ], + response_body["rooms"][room_id1]["timeline"], + ) + # No "live" events in an initial sync (no `from_token` to define the "live" + # range) + self.assertEqual( + response_body["rooms"][room_id1]["num_live"], + 0, + response_body["rooms"][room_id1], + ) + # There are more events to paginate to + self.assertEqual( + response_body["rooms"][room_id1]["limited"], + True, + response_body["rooms"][room_id1], + ) + + def test_room_subscriptions_with_leave_membership(self) -> None: + """ + Test `room_subscriptions` with a leave room should give us timeline and state + events up to the leave event. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + + join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Send some events after user1 leaves + self.helper.send(room_id1, "activity after leave", tok=user2_tok) + # Update state after user1 leaves + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "qux"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request with just the room subscription + sync_body = { + "room_subscriptions": { + room_id1: { + "required_state": [ + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 2, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # We should see the state at the time of the leave + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[("org.matrix.foo_state", "")], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + # We should see some before we left (nothing after) + self.assertEqual( + [ + event["event_id"] + for event in response_body["rooms"][room_id1]["timeline"] + ], + [ + join_response["event_id"], + leave_response["event_id"], + ], + response_body["rooms"][room_id1]["timeline"], + ) + # No "live" events in an initial sync (no `from_token` to define the "live" + # range) + self.assertEqual( + response_body["rooms"][room_id1]["num_live"], + 0, + response_body["rooms"][room_id1], + ) + # There are more events to paginate to + self.assertEqual( + response_body["rooms"][room_id1]["limited"], + True, + response_body["rooms"][room_id1], + ) + + def test_room_subscriptions_no_leak_private_room(self) -> None: + """ + Test `room_subscriptions` with a private room we have never been in should not + leak any data to the user. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=False) + + # We should not be able to join the private room + self.helper.join( + room_id1, user1_id, tok=user1_tok, expect_code=HTTPStatus.FORBIDDEN + ) + + # Make the Sliding Sync request with just the room subscription + sync_body = { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # We should not see the room at all (we're not in it) + self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"]) + + def test_room_subscriptions_world_readable(self) -> None: + """ + Test `room_subscriptions` with a room that has `world_readable` history visibility + + FIXME: We should be able to see the room timeline and state + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a room with `world_readable` history visibility + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "preset": "public_chat", + "initial_state": [ + { + "content": { + "history_visibility": HistoryVisibility.WORLD_READABLE + }, + "state_key": "", + "type": EventTypes.RoomHistoryVisibility, + } + ], + }, + ) + # Ensure we're testing with a room with `world_readable` history visibility + # which means events are visible to anyone even without membership. + history_visibility_response = self.helper.get_state( + room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok + ) + self.assertEqual( + history_visibility_response.get("history_visibility"), + HistoryVisibility.WORLD_READABLE, + ) + + # Note: We never join the room + + # Make the Sliding Sync request with just the room subscription + sync_body = { + "room_subscriptions": { + room_id1: { + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # FIXME: In the future, we should be able to see the room because it's + # `world_readable` but currently we don't support this. + self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"]) diff --git a/tests/rest/client/sliding_sync/test_rooms_invites.py b/tests/rest/client/sliding_sync/test_rooms_invites.py new file mode 100644 index 0000000000..f08ffaf674 --- /dev/null +++ b/tests/rest/client/sliding_sync/test_rooms_invites.py @@ -0,0 +1,510 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EventTypes, HistoryVisibility +from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.types import UserID +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase + +logger = logging.getLogger(__name__) + + +class SlidingSyncRoomsInvitesTestCase(SlidingSyncBase): + """ + Test to make sure the `rooms` response looks good for invites in the Sliding Sync API. + + Invites behave a lot different than other rooms because we don't include the + `timeline` (`num_live`, `limited`, `prev_batch`) or `required_state` in favor of + some stripped state under the `invite_state` key. + + Knocks probably have the same behavior but the spec doesn't mention knocks yet. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + + def test_rooms_invite_shared_history_initial_sync(self) -> None: + """ + Test that `rooms` we are invited to have some stripped `invite_state` during an + initial sync. + + This is an `invite` room so we should only have `stripped_state` (no `timeline`) + but we also shouldn't see any timeline events because the history visiblity is + `shared` and we haven't joined the room yet. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user1 = UserID.from_string(user1_id) + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user2 = UserID.from_string(user2_id) + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + # Ensure we're testing with a room with `shared` history visibility which means + # history visible until you actually join the room. + history_visibility_response = self.helper.get_state( + room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok + ) + self.assertEqual( + history_visibility_response.get("history_visibility"), + HistoryVisibility.SHARED, + ) + + self.helper.send(room_id1, "activity before1", tok=user2_tok) + self.helper.send(room_id1, "activity before2", tok=user2_tok) + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + self.helper.send(room_id1, "activity after3", tok=user2_tok) + self.helper.send(room_id1, "activity after4", tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 3, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # `timeline` is omitted for `invite` rooms with `stripped_state` + self.assertIsNone( + response_body["rooms"][room_id1].get("timeline"), + response_body["rooms"][room_id1], + ) + # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("num_live"), + response_body["rooms"][room_id1], + ) + # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("limited"), + response_body["rooms"][room_id1], + ) + # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("prev_batch"), + response_body["rooms"][room_id1], + ) + # `required_state` is omitted for `invite` rooms with `stripped_state` + self.assertIsNone( + response_body["rooms"][room_id1].get("required_state"), + response_body["rooms"][room_id1], + ) + # We should have some `stripped_state` so the potential joiner can identify the + # room (we don't care about the order). + self.assertCountEqual( + response_body["rooms"][room_id1]["invite_state"], + [ + { + "content": {"creator": user2_id, "room_version": "10"}, + "sender": user2_id, + "state_key": "", + "type": "m.room.create", + }, + { + "content": {"join_rule": "public"}, + "sender": user2_id, + "state_key": "", + "type": "m.room.join_rules", + }, + { + "content": {"displayname": user2.localpart, "membership": "join"}, + "sender": user2_id, + "state_key": user2_id, + "type": "m.room.member", + }, + { + "content": {"displayname": user1.localpart, "membership": "invite"}, + "sender": user2_id, + "state_key": user1_id, + "type": "m.room.member", + }, + ], + response_body["rooms"][room_id1]["invite_state"], + ) + + def test_rooms_invite_shared_history_incremental_sync(self) -> None: + """ + Test that `rooms` we are invited to have some stripped `invite_state` during an + incremental sync. + + This is an `invite` room so we should only have `stripped_state` (no `timeline`) + but we also shouldn't see any timeline events because the history visiblity is + `shared` and we haven't joined the room yet. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user1 = UserID.from_string(user1_id) + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user2 = UserID.from_string(user2_id) + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + # Ensure we're testing with a room with `shared` history visibility which means + # history visible until you actually join the room. + history_visibility_response = self.helper.get_state( + room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok + ) + self.assertEqual( + history_visibility_response.get("history_visibility"), + HistoryVisibility.SHARED, + ) + + self.helper.send(room_id1, "activity before invite1", tok=user2_tok) + self.helper.send(room_id1, "activity before invite2", tok=user2_tok) + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + self.helper.send(room_id1, "activity after invite3", tok=user2_tok) + self.helper.send(room_id1, "activity after invite4", tok=user2_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 3, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + self.helper.send(room_id1, "activity after token5", tok=user2_tok) + self.helper.send(room_id1, "activity after toekn6", tok=user2_tok) + + # Make the Sliding Sync request + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # `timeline` is omitted for `invite` rooms with `stripped_state` + self.assertIsNone( + response_body["rooms"][room_id1].get("timeline"), + response_body["rooms"][room_id1], + ) + # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("num_live"), + response_body["rooms"][room_id1], + ) + # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("limited"), + response_body["rooms"][room_id1], + ) + # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("prev_batch"), + response_body["rooms"][room_id1], + ) + # `required_state` is omitted for `invite` rooms with `stripped_state` + self.assertIsNone( + response_body["rooms"][room_id1].get("required_state"), + response_body["rooms"][room_id1], + ) + # We should have some `stripped_state` so the potential joiner can identify the + # room (we don't care about the order). + self.assertCountEqual( + response_body["rooms"][room_id1]["invite_state"], + [ + { + "content": {"creator": user2_id, "room_version": "10"}, + "sender": user2_id, + "state_key": "", + "type": "m.room.create", + }, + { + "content": {"join_rule": "public"}, + "sender": user2_id, + "state_key": "", + "type": "m.room.join_rules", + }, + { + "content": {"displayname": user2.localpart, "membership": "join"}, + "sender": user2_id, + "state_key": user2_id, + "type": "m.room.member", + }, + { + "content": {"displayname": user1.localpart, "membership": "invite"}, + "sender": user2_id, + "state_key": user1_id, + "type": "m.room.member", + }, + ], + response_body["rooms"][room_id1]["invite_state"], + ) + + def test_rooms_invite_world_readable_history_initial_sync(self) -> None: + """ + Test that `rooms` we are invited to have some stripped `invite_state` during an + initial sync. + + This is an `invite` room so we should only have `stripped_state` (no `timeline`) + but depending on the semantics we decide, we could potentially see some + historical events before/after the `from_token` because the history is + `world_readable`. Same situation for events after the `from_token` if the + history visibility was set to `invited`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user1 = UserID.from_string(user1_id) + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user2 = UserID.from_string(user2_id) + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "preset": "public_chat", + "initial_state": [ + { + "content": { + "history_visibility": HistoryVisibility.WORLD_READABLE + }, + "state_key": "", + "type": EventTypes.RoomHistoryVisibility, + } + ], + }, + ) + # Ensure we're testing with a room with `world_readable` history visibility + # which means events are visible to anyone even without membership. + history_visibility_response = self.helper.get_state( + room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok + ) + self.assertEqual( + history_visibility_response.get("history_visibility"), + HistoryVisibility.WORLD_READABLE, + ) + + self.helper.send(room_id1, "activity before1", tok=user2_tok) + self.helper.send(room_id1, "activity before2", tok=user2_tok) + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + self.helper.send(room_id1, "activity after3", tok=user2_tok) + self.helper.send(room_id1, "activity after4", tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + # Large enough to see the latest events and before the invite + "timeline_limit": 4, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # `timeline` is omitted for `invite` rooms with `stripped_state` + self.assertIsNone( + response_body["rooms"][room_id1].get("timeline"), + response_body["rooms"][room_id1], + ) + # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("num_live"), + response_body["rooms"][room_id1], + ) + # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("limited"), + response_body["rooms"][room_id1], + ) + # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("prev_batch"), + response_body["rooms"][room_id1], + ) + # `required_state` is omitted for `invite` rooms with `stripped_state` + self.assertIsNone( + response_body["rooms"][room_id1].get("required_state"), + response_body["rooms"][room_id1], + ) + # We should have some `stripped_state` so the potential joiner can identify the + # room (we don't care about the order). + self.assertCountEqual( + response_body["rooms"][room_id1]["invite_state"], + [ + { + "content": {"creator": user2_id, "room_version": "10"}, + "sender": user2_id, + "state_key": "", + "type": "m.room.create", + }, + { + "content": {"join_rule": "public"}, + "sender": user2_id, + "state_key": "", + "type": "m.room.join_rules", + }, + { + "content": {"displayname": user2.localpart, "membership": "join"}, + "sender": user2_id, + "state_key": user2_id, + "type": "m.room.member", + }, + { + "content": {"displayname": user1.localpart, "membership": "invite"}, + "sender": user2_id, + "state_key": user1_id, + "type": "m.room.member", + }, + ], + response_body["rooms"][room_id1]["invite_state"], + ) + + def test_rooms_invite_world_readable_history_incremental_sync(self) -> None: + """ + Test that `rooms` we are invited to have some stripped `invite_state` during an + incremental sync. + + This is an `invite` room so we should only have `stripped_state` (no `timeline`) + but depending on the semantics we decide, we could potentially see some + historical events before/after the `from_token` because the history is + `world_readable`. Same situation for events after the `from_token` if the + history visibility was set to `invited`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user1 = UserID.from_string(user1_id) + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user2 = UserID.from_string(user2_id) + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "preset": "public_chat", + "initial_state": [ + { + "content": { + "history_visibility": HistoryVisibility.WORLD_READABLE + }, + "state_key": "", + "type": EventTypes.RoomHistoryVisibility, + } + ], + }, + ) + # Ensure we're testing with a room with `world_readable` history visibility + # which means events are visible to anyone even without membership. + history_visibility_response = self.helper.get_state( + room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok + ) + self.assertEqual( + history_visibility_response.get("history_visibility"), + HistoryVisibility.WORLD_READABLE, + ) + + self.helper.send(room_id1, "activity before invite1", tok=user2_tok) + self.helper.send(room_id1, "activity before invite2", tok=user2_tok) + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + self.helper.send(room_id1, "activity after invite3", tok=user2_tok) + self.helper.send(room_id1, "activity after invite4", tok=user2_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + # Large enough to see the latest events and before the invite + "timeline_limit": 4, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + self.helper.send(room_id1, "activity after token5", tok=user2_tok) + self.helper.send(room_id1, "activity after toekn6", tok=user2_tok) + + # Make the incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # `timeline` is omitted for `invite` rooms with `stripped_state` + self.assertIsNone( + response_body["rooms"][room_id1].get("timeline"), + response_body["rooms"][room_id1], + ) + # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("num_live"), + response_body["rooms"][room_id1], + ) + # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("limited"), + response_body["rooms"][room_id1], + ) + # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) + self.assertIsNone( + response_body["rooms"][room_id1].get("prev_batch"), + response_body["rooms"][room_id1], + ) + # `required_state` is omitted for `invite` rooms with `stripped_state` + self.assertIsNone( + response_body["rooms"][room_id1].get("required_state"), + response_body["rooms"][room_id1], + ) + # We should have some `stripped_state` so the potential joiner can identify the + # room (we don't care about the order). + self.assertCountEqual( + response_body["rooms"][room_id1]["invite_state"], + [ + { + "content": {"creator": user2_id, "room_version": "10"}, + "sender": user2_id, + "state_key": "", + "type": "m.room.create", + }, + { + "content": {"join_rule": "public"}, + "sender": user2_id, + "state_key": "", + "type": "m.room.join_rules", + }, + { + "content": {"displayname": user2.localpart, "membership": "join"}, + "sender": user2_id, + "state_key": user2_id, + "type": "m.room.member", + }, + { + "content": {"displayname": user1.localpart, "membership": "invite"}, + "sender": user2_id, + "state_key": user1_id, + "type": "m.room.member", + }, + ], + response_body["rooms"][room_id1]["invite_state"], + ) diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py new file mode 100644 index 0000000000..04f11c0524 --- /dev/null +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -0,0 +1,710 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EventTypes, Membership +from synapse.api.room_versions import RoomVersions +from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase +from tests.test_utils.event_injection import create_event + +logger = logging.getLogger(__name__) + + +class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): + """ + Test rooms meta info like name, avatar, joined_count, invited_count, is_dm, + bump_stamp in the Sliding Sync API. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + + def test_rooms_meta_when_joined(self) -> None: + """ + Test that the `rooms` `name` and `avatar` are included in the response and + reflect the current state of the room when the user is joined to the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Reflect the current state of the room + self.assertEqual( + response_body["rooms"][room_id1]["name"], + "my super room", + response_body["rooms"][room_id1], + ) + self.assertEqual( + response_body["rooms"][room_id1]["avatar"], + "mxc://DUMMY_MEDIA_ID", + response_body["rooms"][room_id1], + ) + self.assertEqual( + response_body["rooms"][room_id1]["joined_count"], + 2, + ) + self.assertEqual( + response_body["rooms"][room_id1]["invited_count"], + 0, + ) + self.assertIsNone( + response_body["rooms"][room_id1].get("is_dm"), + ) + + def test_rooms_meta_when_invited(self) -> None: + """ + Test that the `rooms` `name` and `avatar` are included in the response and + reflect the current state of the room when the user is invited to the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + # User1 is invited to the room + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # Update the room name after user1 has left + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Update the room avatar URL after user1 has left + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://UPDATED_DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # This should still reflect the current state of the room even when the user is + # invited. + self.assertEqual( + response_body["rooms"][room_id1]["name"], + "my super duper room", + response_body["rooms"][room_id1], + ) + self.assertEqual( + response_body["rooms"][room_id1]["avatar"], + "mxc://UPDATED_DUMMY_MEDIA_ID", + response_body["rooms"][room_id1], + ) + self.assertEqual( + response_body["rooms"][room_id1]["joined_count"], + 1, + ) + self.assertEqual( + response_body["rooms"][room_id1]["invited_count"], + 1, + ) + self.assertIsNone( + response_body["rooms"][room_id1].get("is_dm"), + ) + + def test_rooms_meta_when_banned(self) -> None: + """ + Test that the `rooms` `name` and `avatar` reflect the state of the room when the + user was banned (do not leak current state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # Update the room name after user1 has left + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Update the room avatar URL after user1 has left + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://UPDATED_DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Reflect the state of the room at the time of leaving + self.assertEqual( + response_body["rooms"][room_id1]["name"], + "my super room", + response_body["rooms"][room_id1], + ) + self.assertEqual( + response_body["rooms"][room_id1]["avatar"], + "mxc://DUMMY_MEDIA_ID", + response_body["rooms"][room_id1], + ) + self.assertEqual( + response_body["rooms"][room_id1]["joined_count"], + # FIXME: The actual number should be "1" (user2) but we currently don't + # support this for rooms where the user has left/been banned. + 0, + ) + self.assertEqual( + response_body["rooms"][room_id1]["invited_count"], + 0, + ) + self.assertIsNone( + response_body["rooms"][room_id1].get("is_dm"), + ) + + def test_rooms_meta_heroes(self) -> None: + """ + Test that the `rooms` `heroes` are included in the response when the room + doesn't have a room name set. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + self.helper.join(room_id1, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok) + + room_id2 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room2", + }, + ) + self.helper.join(room_id2, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id2, src=user2_id, targ=user3_id, tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Room1 has a name so we shouldn't see any `heroes` which the client would use + # the calculate the room name themselves. + self.assertEqual( + response_body["rooms"][room_id1]["name"], + "my super room", + response_body["rooms"][room_id1], + ) + self.assertIsNone(response_body["rooms"][room_id1].get("heroes")) + self.assertEqual( + response_body["rooms"][room_id1]["joined_count"], + 2, + ) + self.assertEqual( + response_body["rooms"][room_id1]["invited_count"], + 1, + ) + + # Room2 doesn't have a name so we should see `heroes` populated + self.assertIsNone(response_body["rooms"][room_id2].get("name")) + self.assertCountEqual( + [ + hero["user_id"] + for hero in response_body["rooms"][room_id2].get("heroes", []) + ], + # Heroes shouldn't include the user themselves (we shouldn't see user1) + [user2_id, user3_id], + ) + self.assertEqual( + response_body["rooms"][room_id2]["joined_count"], + 2, + ) + self.assertEqual( + response_body["rooms"][room_id2]["invited_count"], + 1, + ) + + # We didn't request any state so we shouldn't see any `required_state` + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + self.assertIsNone(response_body["rooms"][room_id2].get("required_state")) + + def test_rooms_meta_heroes_max(self) -> None: + """ + Test that the `rooms` `heroes` only includes the first 5 users (not including + yourself). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + user6_id = self.register_user("user6", "pass") + user6_tok = self.login(user6_id, "pass") + user7_id = self.register_user("user7", "pass") + user7_tok = self.login(user7_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room", + }, + ) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + self.helper.join(room_id1, user5_id, tok=user5_tok) + self.helper.join(room_id1, user6_id, tok=user6_tok) + self.helper.join(room_id1, user7_id, tok=user7_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Room2 doesn't have a name so we should see `heroes` populated + self.assertIsNone(response_body["rooms"][room_id1].get("name")) + self.assertCountEqual( + [ + hero["user_id"] + for hero in response_body["rooms"][room_id1].get("heroes", []) + ], + # Heroes should be the first 5 users in the room (excluding the user + # themselves, we shouldn't see `user1`) + [user2_id, user3_id, user4_id, user5_id, user6_id], + ) + self.assertEqual( + response_body["rooms"][room_id1]["joined_count"], + 7, + ) + self.assertEqual( + response_body["rooms"][room_id1]["invited_count"], + 0, + ) + + # We didn't request any state so we shouldn't see any `required_state` + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + + def test_rooms_meta_heroes_when_banned(self) -> None: + """ + Test that the `rooms` `heroes` are included in the response when the room + doesn't have a room name set but doesn't leak information past their ban. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + _user5_tok = self.login(user5_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room", + }, + ) + # User1 joins the room + self.helper.join(room_id1, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok) + + # User1 is banned from the room + self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # User4 joins the room after user1 is banned + self.helper.join(room_id1, user4_id, tok=user4_tok) + # User5 is invited after user1 is banned + self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Room2 doesn't have a name so we should see `heroes` populated + self.assertIsNone(response_body["rooms"][room_id1].get("name")) + self.assertCountEqual( + [ + hero["user_id"] + for hero in response_body["rooms"][room_id1].get("heroes", []) + ], + # Heroes shouldn't include the user themselves (we shouldn't see user1). We + # also shouldn't see user4 since they joined after user1 was banned. + # + # FIXME: The actual result should be `[user2_id, user3_id]` but we currently + # don't support this for rooms where the user has left/been banned. + [], + ) + + self.assertEqual( + response_body["rooms"][room_id1]["joined_count"], + # FIXME: The actual number should be "1" (user2) but we currently don't + # support this for rooms where the user has left/been banned. + 0, + ) + self.assertEqual( + response_body["rooms"][room_id1]["invited_count"], + # We shouldn't see user5 since they were invited after user1 was banned. + # + # FIXME: The actual number should be "1" (user3) but we currently don't + # support this for rooms where the user has left/been banned. + 0, + ) + + def test_rooms_bump_stamp(self) -> None: + """ + Test that `bump_stamp` is present and pointing to relevant events. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + event_response1 = message_response = self.helper.send( + room_id1, "message in room1", tok=user1_tok + ) + event_pos1 = self.get_success( + self.store.get_position_for_event(event_response1["event_id"]) + ) + room_id2 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + send_response2 = self.helper.send(room_id2, "message in room2", tok=user1_tok) + event_pos2 = self.get_success( + self.store.get_position_for_event(send_response2["event_id"]) + ) + + # Send a reaction in room1 but it shouldn't affect the `bump_stamp` + # because reactions are not part of the `DEFAULT_BUMP_EVENT_TYPES` + self.helper.send_event( + room_id1, + type=EventTypes.Reaction, + content={ + "m.relates_to": { + "event_id": message_response["event_id"], + "key": "👍", + "rel_type": "m.annotation", + } + }, + tok=user1_tok, + ) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 100, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure it has the foo-list we requested + self.assertListEqual( + list(response_body["lists"].keys()), + ["foo-list"], + response_body["lists"].keys(), + ) + + # Make sure the list includes the rooms in the right order + self.assertListEqual( + list(response_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 1], + # room1 sorts before room2 because it has the latest event (the + # reaction) + "room_ids": [room_id1, room_id2], + } + ], + response_body["lists"]["foo-list"], + ) + + # The `bump_stamp` for room1 should point at the latest message (not the + # reaction since it's not one of the `DEFAULT_BUMP_EVENT_TYPES`) + self.assertEqual( + response_body["rooms"][room_id1]["bump_stamp"], + event_pos1.stream, + response_body["rooms"][room_id1], + ) + + # The `bump_stamp` for room2 should point at the latest message + self.assertEqual( + response_body["rooms"][room_id2]["bump_stamp"], + event_pos2.stream, + response_body["rooms"][room_id2], + ) + + def test_rooms_bump_stamp_backfill(self) -> None: + """ + Test that `bump_stamp` ignores backfilled events, i.e. events with a + negative stream ordering. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote room + creator = "@user:other" + room_id = "!foo:other" + shared_kwargs = { + "room_id": room_id, + "room_version": "10", + } + + create_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[], + type=EventTypes.Create, + state_key="", + sender=creator, + **shared_kwargs, + ) + ) + creator_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[create_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Member, + state_key=creator, + content={"membership": Membership.JOIN}, + sender=creator, + **shared_kwargs, + ) + ) + # We add a message event as a valid "bump type" + msg_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[creator_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Message, + content={"body": "foo", "msgtype": "m.text"}, + sender=creator, + **shared_kwargs, + ) + ) + invite_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[msg_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.INVITE}, + sender=creator, + **shared_kwargs, + ) + ) + + remote_events_and_contexts = [ + create_tuple, + creator_tuple, + msg_tuple, + invite_tuple, + ] + + # Ensure the local HS knows the room version + self.get_success( + self.store.store_room(room_id, creator, False, RoomVersions.V10) + ) + + # Persist these events as backfilled events. + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + + for event, context in remote_events_and_contexts: + self.get_success(persistence.persist_event(event, context, backfilled=True)) + + # Now we join the local user to the room + join_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[invite_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.JOIN}, + sender=user1_id, + **shared_kwargs, + ) + ) + self.get_success(persistence.persist_event(*join_tuple)) + + # Doing an SS request should return a positive `bump_stamp`, even though + # the only event that matches the bump types has as negative stream + # ordering. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 5, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0) diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py new file mode 100644 index 0000000000..03e36914ae --- /dev/null +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -0,0 +1,713 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EventTypes, Membership +from synapse.handlers.sliding_sync import StateValues +from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase +from tests.test_utils.event_injection import mark_event_as_partial_state + +logger = logging.getLogger(__name__) + + +class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): + """ + Test `rooms.required_state` in the Sliding Sync API. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + + def test_rooms_no_required_state(self) -> None: + """ + Empty `rooms.required_state` should not return any state events in the room + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + # Empty `required_state` + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # No `required_state` in response + self.assertIsNone( + response_body["rooms"][room_id1].get("required_state"), + response_body["rooms"][room_id1], + ) + + def test_rooms_required_state_initial_sync(self) -> None: + """ + Test `rooms.required_state` returns requested state events in the room during an + initial sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Tombstone, ""], + ], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.RoomHistoryVisibility, "")], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_incremental_sync(self) -> None: + """ + Test `rooms.required_state` returns requested state events in the room during an + incremental sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Tombstone, ""], + ], + "timeline_limit": 1, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Make the incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # We only return updates but only if we've sent the room down the + # connection before. + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_incremental_sync_restart(self) -> None: + """ + Test `rooms.required_state` returns requested state events in the room during an + incremental sync, after a restart (and so the in memory caches are reset). + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Tombstone, ""], + ], + "timeline_limit": 1, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Reset the in-memory cache + self.hs.get_sliding_sync_handler().connection_store._connections.clear() + + # Make the Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # If the cache has been cleared then we do expect the state to come down + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.RoomHistoryVisibility, "")], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_wildcard(self) -> None: + """ + Test `rooms.required_state` returns all state events when using wildcard `["*", "*"]`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="namespaced", + body={"foo": "bar"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request with wildcards for the `event_type` and `state_key` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [StateValues.WILDCARD, StateValues.WILDCARD], + ], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + # We should see all the state events in the room + state_map.values(), + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_wildcard_event_type(self) -> None: + """ + Test `rooms.required_state` returns relevant state events when using wildcard in + the event_type `["*", "foobarbaz"]`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key=user2_id, + body={"foo": "bar"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request with wildcards for the `event_type` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [StateValues.WILDCARD, user2_id], + ], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # We expect at-least any state event with the `user2_id` as the `state_key` + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Member, user2_id)], + state_map[("org.matrix.foo_state", user2_id)], + }, + # Ideally, this would be exact but we're currently returning all state + # events when the `event_type` is a wildcard. + exact=False, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_wildcard_state_key(self) -> None: + """ + Test `rooms.required_state` returns relevant state events when using wildcard in + the state_key `["foobarbaz","*"]`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request with wildcards for the `state_key` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Member, StateValues.WILDCARD], + ], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Member, user1_id)], + state_map[(EventTypes.Member, user2_id)], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_lazy_loading_room_members(self) -> None: + """ + Test `rooms.required_state` returns people relevant to the timeline when + lazy-loading room members, `["m.room.member","$LAZY"]`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + self.helper.send(room_id1, "2", tok=user3_tok) + self.helper.send(room_id1, "3", tok=user2_tok) + + # Make the Sliding Sync request with lazy loading for the room members + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.LAZY], + ], + "timeline_limit": 3, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user3 sent events in the 3 events we see in the `timeline` + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.Member, user2_id)], + state_map[(EventTypes.Member, user3_id)], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_me(self) -> None: + """ + Test `rooms.required_state` correctly handles $ME. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + + # Also send normal state events with state keys of the users, first + # change the power levels to allow this. + self.helper.send_state( + room_id1, + event_type=EventTypes.PowerLevels, + body={"users": {user1_id: 50, user2_id: 100}}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo", + state_key=user1_id, + body={}, + tok=user1_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo", + state_key=user2_id, + body={}, + tok=user2_tok, + ) + + # Make the Sliding Sync request with a request for '$ME'. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.ME], + ["org.matrix.foo", StateValues.ME], + ], + "timeline_limit": 3, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user3 sent events in the 3 events we see in the `timeline` + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.Member, user1_id)], + state_map[("org.matrix.foo", user1_id)], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) + def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: + """ + Test `rooms.required_state` should not return state past a leave/ban event. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, "*"], + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 3, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + + if stop_membership == Membership.LEAVE: + # User 1 leaves + self.helper.leave(room_id1, user1_id, tok=user1_tok) + elif stop_membership == Membership.BAN: + # User 1 is banned + self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Change the state after user 1 leaves + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "qux"}, + tok=user2_tok, + ) + self.helper.leave(room_id1, user3_id, tok=user3_tok) + + # Make the Sliding Sync request with lazy loading for the room members + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Only user2 and user3 sent events in the 3 events we see in the `timeline` + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.Member, user1_id)], + state_map[(EventTypes.Member, user2_id)], + state_map[(EventTypes.Member, user3_id)], + state_map[("org.matrix.foo_state", "")], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_combine_superset(self) -> None: + """ + Test `rooms.required_state` is combined across lists and room subscriptions. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "qux"}, + tok=user2_tok, + ) + + # Make the Sliding Sync request with wildcards for the `state_key` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, user1_id], + ], + "timeline_limit": 0, + }, + "bar-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Member, StateValues.WILDCARD], + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 0, + }, + }, + "room_subscriptions": { + room_id1: { + "required_state": [["org.matrix.bar_state", ""]], + "timeline_limit": 0, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.Member, user1_id)], + state_map[(EventTypes.Member, user2_id)], + state_map[("org.matrix.foo_state", "")], + state_map[("org.matrix.bar_state", "")], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_partial_state(self) -> None: + """ + Test partially-stated room are excluded unless `rooms.required_state` is + lazy-loading room members. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + _join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) + join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok) + + # Mark room2 as partial state + self.get_success( + mark_event_as_partial_state(self.hs, join_response2["event_id"], room_id2) + ) + + # Make the Sliding Sync request (NOT lazy-loading room members) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 0, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure the list includes room1 but room2 is excluded because it's still + # partially-stated + self.assertListEqual( + list(response_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 1], + "room_ids": [room_id1], + } + ], + response_body["lists"]["foo-list"], + ) + + # Make the Sliding Sync request (with lazy-loading room members) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + # Lazy-load room members + [EventTypes.Member, StateValues.LAZY], + ], + "timeline_limit": 0, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # The list should include both rooms now because we're lazy-loading room members + self.assertListEqual( + list(response_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 1], + "room_ids": [room_id2, room_id1], + } + ], + response_body["lists"]["foo-list"], + ) diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py new file mode 100644 index 0000000000..84a1e0d223 --- /dev/null +++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py @@ -0,0 +1,493 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.types import StreamToken +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase + +logger = logging.getLogger(__name__) + + +class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): + """ + Test `rooms.timeline` in the Sliding Sync API. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + + def test_rooms_limited_initial_sync(self) -> None: + """ + Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit` + on initial sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.send(room_id1, "activity1", tok=user2_tok) + self.helper.send(room_id1, "activity2", tok=user2_tok) + event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok) + event_pos3 = self.get_success( + self.store.get_position_for_event(event_response3["event_id"]) + ) + event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok) + event_pos4 = self.get_success( + self.store.get_position_for_event(event_response4["event_id"]) + ) + event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok) + user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 3, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # We expect to saturate the `timeline_limit` (there are more than 3 messages in the room) + self.assertEqual( + response_body["rooms"][room_id1]["limited"], + True, + response_body["rooms"][room_id1], + ) + # Check to make sure the latest events are returned + self.assertEqual( + [ + event["event_id"] + for event in response_body["rooms"][room_id1]["timeline"] + ], + [ + event_response4["event_id"], + event_response5["event_id"], + user1_join_response["event_id"], + ], + response_body["rooms"][room_id1]["timeline"], + ) + + # Check to make sure the `prev_batch` points at the right place + prev_batch_token = self.get_success( + StreamToken.from_string( + self.store, response_body["rooms"][room_id1]["prev_batch"] + ) + ) + prev_batch_room_stream_token_serialized = self.get_success( + prev_batch_token.room_key.to_string(self.store) + ) + # If we use the `prev_batch` token to look backwards, we should see `event3` + # next so make sure the token encompasses it + self.assertEqual( + event_pos3.persisted_after(prev_batch_token.room_key), + False, + f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be >= event_pos3={self.get_success(event_pos3.to_room_stream_token().to_string(self.store))}", + ) + # If we use the `prev_batch` token to look backwards, we shouldn't see `event4` + # anymore since it was just returned in this response. + self.assertEqual( + event_pos4.persisted_after(prev_batch_token.room_key), + True, + f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be < event_pos4={self.get_success(event_pos4.to_room_stream_token().to_string(self.store))}", + ) + + # With no `from_token` (initial sync), it's all historical since there is no + # "live" range + self.assertEqual( + response_body["rooms"][room_id1]["num_live"], + 0, + response_body["rooms"][room_id1], + ) + + def test_rooms_not_limited_initial_sync(self) -> None: + """ + Test that we mark `rooms` as `limited=False` when there are no more events to + paginate to. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.send(room_id1, "activity1", tok=user2_tok) + self.helper.send(room_id1, "activity2", tok=user2_tok) + self.helper.send(room_id1, "activity3", tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + timeline_limit = 100 + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": timeline_limit, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # The timeline should be `limited=False` because we have all of the events (no + # more to paginate to) + self.assertEqual( + response_body["rooms"][room_id1]["limited"], + False, + response_body["rooms"][room_id1], + ) + expected_number_of_events = 9 + # We're just looking to make sure we got all of the events before hitting the `timeline_limit` + self.assertEqual( + len(response_body["rooms"][room_id1]["timeline"]), + expected_number_of_events, + response_body["rooms"][room_id1]["timeline"], + ) + self.assertLessEqual(expected_number_of_events, timeline_limit) + + # With no `from_token` (initial sync), it's all historical since there is no + # "live" token range. + self.assertEqual( + response_body["rooms"][room_id1]["num_live"], + 0, + response_body["rooms"][room_id1], + ) + + def test_rooms_incremental_sync(self) -> None: + """ + Test `rooms` data during an incremental sync after an initial sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.send(room_id1, "activity before initial sync1", tok=user2_tok) + + # Make an initial Sliding Sync request to grab a token. This is also a sanity + # check that we can go from initial to incremental sync. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 3, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send some events but don't send enough to saturate the `timeline_limit`. + # We want to later test that we only get the new events since the `next_pos` + event_response2 = self.helper.send(room_id1, "activity after2", tok=user2_tok) + event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok) + + # Make an incremental Sliding Sync request (what we're trying to test) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # We only expect to see the new events since the last sync which isn't enough to + # fill up the `timeline_limit`. + self.assertEqual( + response_body["rooms"][room_id1]["limited"], + False, + f'Our `timeline_limit` was {sync_body["lists"]["foo-list"]["timeline_limit"]} ' + + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. ' + + str(response_body["rooms"][room_id1]), + ) + # Check to make sure the latest events are returned + self.assertEqual( + [ + event["event_id"] + for event in response_body["rooms"][room_id1]["timeline"] + ], + [ + event_response2["event_id"], + event_response3["event_id"], + ], + response_body["rooms"][room_id1]["timeline"], + ) + + # All events are "live" + self.assertEqual( + response_body["rooms"][room_id1]["num_live"], + 2, + response_body["rooms"][room_id1], + ) + + def test_rooms_newly_joined_incremental_sync(self) -> None: + """ + Test that when we make an incremental sync with a `newly_joined` `rooms`, we are + able to see some historical events before the `from_token`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.send(room_id1, "activity before token1", tok=user2_tok) + event_response2 = self.helper.send( + room_id1, "activity before token2", tok=user2_tok + ) + + # The `timeline_limit` is set to 4 so we can at least see one historical event + # before the `from_token`. We should see historical events because this is a + # `newly_joined` room. + timeline_limit = 4 + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": timeline_limit, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Join the room after the `from_token` which will make us consider this room as + # `newly_joined`. + user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Send some events but don't send enough to saturate the `timeline_limit`. + # We want to later test that we only get the new events since the `next_pos` + event_response3 = self.helper.send( + room_id1, "activity after token3", tok=user2_tok + ) + event_response4 = self.helper.send( + room_id1, "activity after token4", tok=user2_tok + ) + + # Make an incremental Sliding Sync request (what we're trying to test) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # We should see the new events and the rest should be filled with historical + # events which will make us `limited=True` since there are more to paginate to. + self.assertEqual( + response_body["rooms"][room_id1]["limited"], + True, + f"Our `timeline_limit` was {timeline_limit} " + + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. ' + + str(response_body["rooms"][room_id1]), + ) + # Check to make sure that the "live" and historical events are returned + self.assertEqual( + [ + event["event_id"] + for event in response_body["rooms"][room_id1]["timeline"] + ], + [ + event_response2["event_id"], + user1_join_response["event_id"], + event_response3["event_id"], + event_response4["event_id"], + ], + response_body["rooms"][room_id1]["timeline"], + ) + + # Only events after the `from_token` are "live" (join, event3, event4) + self.assertEqual( + response_body["rooms"][room_id1]["num_live"], + 3, + response_body["rooms"][room_id1], + ) + + def test_rooms_ban_initial_sync(self) -> None: + """ + Test that `rooms` we are banned from in an intial sync only allows us to see + timeline events up to the ban event. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.send(room_id1, "activity before1", tok=user2_tok) + self.helper.send(room_id1, "activity before2", tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok) + event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok) + user1_ban_response = self.helper.ban( + room_id1, src=user2_id, targ=user1_id, tok=user2_tok + ) + + self.helper.send(room_id1, "activity after5", tok=user2_tok) + self.helper.send(room_id1, "activity after6", tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 3, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # We should see events before the ban but not after + self.assertEqual( + [ + event["event_id"] + for event in response_body["rooms"][room_id1]["timeline"] + ], + [ + event_response3["event_id"], + event_response4["event_id"], + user1_ban_response["event_id"], + ], + response_body["rooms"][room_id1]["timeline"], + ) + # No "live" events in an initial sync (no `from_token` to define the "live" + # range) + self.assertEqual( + response_body["rooms"][room_id1]["num_live"], + 0, + response_body["rooms"][room_id1], + ) + # There are more events to paginate to + self.assertEqual( + response_body["rooms"][room_id1]["limited"], + True, + response_body["rooms"][room_id1], + ) + + def test_rooms_ban_incremental_sync1(self) -> None: + """ + Test that `rooms` we are banned from during the next incremental sync only + allows us to see timeline events up to the ban event. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.send(room_id1, "activity before1", tok=user2_tok) + self.helper.send(room_id1, "activity before2", tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 4, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok) + event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok) + # The ban is within the token range (between the `from_token` and the sliding + # sync request) + user1_ban_response = self.helper.ban( + room_id1, src=user2_id, targ=user1_id, tok=user2_tok + ) + + self.helper.send(room_id1, "activity after5", tok=user2_tok) + self.helper.send(room_id1, "activity after6", tok=user2_tok) + + # Make the incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # We should see events before the ban but not after + self.assertEqual( + [ + event["event_id"] + for event in response_body["rooms"][room_id1]["timeline"] + ], + [ + event_response3["event_id"], + event_response4["event_id"], + user1_ban_response["event_id"], + ], + response_body["rooms"][room_id1]["timeline"], + ) + # All live events in the incremental sync + self.assertEqual( + response_body["rooms"][room_id1]["num_live"], + 3, + response_body["rooms"][room_id1], + ) + # There aren't anymore events to paginate to in this range + self.assertEqual( + response_body["rooms"][room_id1]["limited"], + False, + response_body["rooms"][room_id1], + ) + + def test_rooms_ban_incremental_sync2(self) -> None: + """ + Test that `rooms` we are banned from before the incremental sync don't return + any events in the timeline. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.send(room_id1, "activity before1", tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + self.helper.send(room_id1, "activity after2", tok=user2_tok) + # The ban is before we get our `from_token` + self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + self.helper.send(room_id1, "activity after3", tok=user2_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 4, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + self.helper.send(room_id1, "activity after4", tok=user2_tok) + + # Make the incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Nothing to see for this banned user in the room in the token range + self.assertIsNone(response_body["rooms"].get(room_id1)) diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py new file mode 100644 index 0000000000..cb7638c5ba --- /dev/null +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -0,0 +1,974 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging +from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple + +from typing_extensions import assert_never + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import ( + AccountDataTypes, + EventContentFields, + EventTypes, + RoomTypes, +) +from synapse.events import EventBase +from synapse.rest.client import devices, login, receipts, room, sync +from synapse.server import HomeServer +from synapse.types import ( + JsonDict, + RoomStreamToken, + SlidingSyncStreamToken, + StreamKeyType, + StreamToken, +) +from synapse.util import Clock +from synapse.util.stringutils import random_string + +from tests import unittest +from tests.server import TimedOutException + +logger = logging.getLogger(__name__) + + +class SlidingSyncBase(unittest.HomeserverTestCase): + """Base class for sliding sync test cases""" + + sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + + def do_sync( + self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str + ) -> Tuple[JsonDict, str]: + """Do a sliding sync request with given body. + + Asserts the request was successful. + + Attributes: + sync_body: The full request body to use + since: Optional since token + tok: Access token to use + + Returns: + A tuple of the response body and the `pos` field. + """ + + sync_path = self.sync_endpoint + if since: + sync_path += f"?pos={since}" + + channel = self.make_request( + method="POST", + path=sync_path, + content=sync_body, + access_token=tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + return channel.json_body, channel.json_body["pos"] + + def _assertRequiredStateIncludes( + self, + actual_required_state: Any, + expected_state_events: Iterable[EventBase], + exact: bool = False, + ) -> None: + """ + Wrapper around `assertIncludes` to give slightly better looking diff error + messages that include some context "$event_id (type, state_key)". + + Args: + actual_required_state: The "required_state" of a room from a Sliding Sync + request response. + expected_state_events: The expected state events to be included in the + `actual_required_state`. + exact: Whether the actual state should be exactly equal to the expected + state (no extras). + """ + + assert isinstance(actual_required_state, list) + for event in actual_required_state: + assert isinstance(event, dict) + + self.assertIncludes( + { + f'{event["event_id"]} ("{event["type"]}", "{event["state_key"]}")' + for event in actual_required_state + }, + { + f'{event.event_id} ("{event.type}", "{event.state_key}")' + for event in expected_state_events + }, + exact=exact, + # Message to help understand the diff in context + message=str(actual_required_state), + ) + + def _bump_notifier_wait_for_events( + self, + user_id: str, + wake_stream_key: Literal[ + StreamKeyType.ACCOUNT_DATA, + StreamKeyType.PRESENCE, + ], + ) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + + Args: + user_id: The user ID to wake up the notifier for + wake_stream_key: The stream key to wake up. This will create an actual new + entity in that stream so it's best to choose one that won't affect the + Sliding Sync results you're testing for. In other words, if your testing + account data, choose `StreamKeyType.PRESENCE` instead. We support two + possible stream keys because you're probably testing one or the other so + one is always a "safe" option. + """ + # We're expecting some new activity from this point onwards + from_token = self.hs.get_event_sources().get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + notifier = self.hs.get_notifier() + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data or presence so that `notifier.wait_for_events(...)` + # wakes up. We chose these two options because they're least likely to show up + # in the Sliding Sync response so it won't affect whether we have results. + if wake_stream_key == StreamKeyType.ACCOUNT_DATA: + self.get_success( + self.hs.get_account_data_handler().add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + elif wake_stream_key == StreamKeyType.PRESENCE: + sending_user_id = self.register_user( + "user_bump_notifier_wait_for_events_" + random_string(10), "pass" + ) + sending_user_tok = self.login(sending_user_id, "pass") + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user_id: {"d1": test_msg}}}, + access_token=sending_user_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + else: + assert_never(wake_stream_key) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + + +class SlidingSyncTestCase(SlidingSyncBase): + """ + Tests regarding MSC3575 Sliding Sync `/sync` endpoint. + + Please put tests in more specific test files if applicable. This test class is meant + for generic behavior of the endpoint. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + devices.register_servlets, + receipts.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.storage_controllers = hs.get_storage_controllers() + self.account_data_handler = hs.get_account_data_handler() + + def _add_new_dm_to_global_account_data( + self, source_user_id: str, target_user_id: str, target_room_id: str + ) -> None: + """ + Helper to handle inserting a new DM for the source user into global account data + (handles all of the list merging). + + Args: + source_user_id: The user ID of the DM mapping we're going to update + target_user_id: User ID of the person the DM is with + target_room_id: Room ID of the DM + """ + + # Get the current DM map + existing_dm_map = self.get_success( + self.store.get_global_account_data_by_type_for_user( + source_user_id, AccountDataTypes.DIRECT + ) + ) + # Scrutinize the account data since it has no concrete type. We're just copying + # everything into a known type. It should be a mapping from user ID to a list of + # room IDs. Ignore anything else. + new_dm_map: Dict[str, List[str]] = {} + if isinstance(existing_dm_map, dict): + for user_id, room_ids in existing_dm_map.items(): + if isinstance(user_id, str) and isinstance(room_ids, list): + for room_id in room_ids: + if isinstance(room_id, str): + new_dm_map[user_id] = new_dm_map.get(user_id, []) + [ + room_id + ] + + # Add the new DM to the map + new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [ + target_room_id + ] + # Save the DM map to global account data + self.get_success( + self.store.add_account_data_for_user( + source_user_id, + AccountDataTypes.DIRECT, + new_dm_map, + ) + ) + + def _create_dm_room( + self, + inviter_user_id: str, + inviter_tok: str, + invitee_user_id: str, + invitee_tok: str, + should_join_room: bool = True, + ) -> str: + """ + Helper to create a DM room as the "inviter" and invite the "invitee" user to the + room. The "invitee" user also will join the room. The `m.direct` account data + will be set for both users. + """ + + # Create a room and send an invite the other user + room_id = self.helper.create_room_as( + inviter_user_id, + is_public=False, + tok=inviter_tok, + ) + self.helper.invite( + room_id, + src=inviter_user_id, + targ=invitee_user_id, + tok=inviter_tok, + extra_data={"is_direct": True}, + ) + if should_join_room: + # Person that was invited joins the room + self.helper.join(room_id, invitee_user_id, tok=invitee_tok) + + # Mimic the client setting the room as a direct message in the global account + # data for both users. + self._add_new_dm_to_global_account_data( + invitee_user_id, inviter_user_id, room_id + ) + self._add_new_dm_to_global_account_data( + inviter_user_id, invitee_user_id, room_id + ) + + return room_id + + def test_sync_list(self) -> None: + """ + Test that room IDs show up in the Sliding Sync `lists` + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure it has the foo-list we requested + self.assertListEqual( + list(response_body["lists"].keys()), + ["foo-list"], + response_body["lists"].keys(), + ) + + # Make sure the list includes the room we are joined to + self.assertListEqual( + list(response_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [room_id], + } + ], + response_body["lists"]["foo-list"], + ) + + def test_wait_for_sync_token(self) -> None: + """ + Test that worker will wait until it catches up to the given token + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a future token that will cause us to wait. Since we never send a new + # event to reach that future stream_ordering, the worker will wait until the + # full timeout. + stream_id_gen = self.store.get_events_stream_id_generator() + stream_id = self.get_success(stream_id_gen.get_next().__aenter__()) + current_token = self.event_sources.get_current_token() + future_position_token = current_token.copy_and_replace( + StreamKeyType.ROOM, + RoomStreamToken(stream=stream_id), + ) + + future_position_token_serialized = self.get_success( + SlidingSyncStreamToken(future_position_token, 0).to_string(self.store) + ) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + } + } + } + channel = self.make_request( + "POST", + self.sync_endpoint + f"?pos={future_position_token_serialized}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 10 seconds to make `notifier.wait_for_stream_token(from_token)` + # timeout + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=9900) + channel.await_result(timeout_ms=200) + self.assertEqual(channel.code, 200, channel.json_body) + + # We expect the next `pos` in the result to be the same as what we requested + # with because we weren't able to find anything new yet. + self.assertEqual(channel.json_body["pos"], future_position_token_serialized) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the room with new events to trigger new results + event_response1 = self.helper.send( + room_id, "new activity in room", tok=user1_tok + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check to make sure the new event is returned + self.assertEqual( + [ + event["event_id"] + for event in channel.json_body["rooms"][room_id]["timeline"] + ], + [ + event_response1["event_id"], + ], + channel.json_body["rooms"][room_id]["timeline"], + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events( + user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA + ) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + # There should be no room sent down. + self.assertFalse(channel.json_body["rooms"]) + + def test_filter_list(self) -> None: + """ + Test that filters apply to `lists` + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a DM room + joined_dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + should_join_room=True, + ) + invited_dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + should_join_room=False, + ) + + # Create a normal room + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create a room that user1 is invited to + invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + # Absense of filters does not imply "False" values + "all": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {}, + }, + # Test single truthy filter + "dms": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": True}, + }, + # Test single falsy filter + "non-dms": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": False}, + }, + # Test how multiple filters should stack (AND'd together) + "room-invites": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": False, "is_invite": True}, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure it has the foo-list we requested + self.assertListEqual( + list(response_body["lists"].keys()), + ["all", "dms", "non-dms", "room-invites"], + response_body["lists"].keys(), + ) + + # Make sure the lists have the correct rooms + self.assertListEqual( + list(response_body["lists"]["all"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [ + invite_room_id, + room_id, + invited_dm_room_id, + joined_dm_room_id, + ], + } + ], + list(response_body["lists"]["all"]), + ) + self.assertListEqual( + list(response_body["lists"]["dms"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [invited_dm_room_id, joined_dm_room_id], + } + ], + list(response_body["lists"]["dms"]), + ) + self.assertListEqual( + list(response_body["lists"]["non-dms"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [invite_room_id, room_id], + } + ], + list(response_body["lists"]["non-dms"]), + ) + self.assertListEqual( + list(response_body["lists"]["room-invites"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [invite_room_id], + } + ], + list(response_body["lists"]["room-invites"]), + ) + + # Ensure DM's are correctly marked + self.assertDictEqual( + { + room_id: room.get("is_dm") + for room_id, room in response_body["rooms"].items() + }, + { + invite_room_id: None, + room_id: None, + invited_dm_room_id: True, + joined_dm_room_id: True, + }, + ) + + def test_filter_regardless_of_membership_server_left_room(self) -> None: + """ + Test that filters apply to rooms regardless of membership. We're also + compounding the problem by having all of the local users leave the room causing + our server to leave the room. + + We want to make sure that if someone is filtering rooms, and leaves, you still + get that final update down sync that you left. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a normal room + room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create an encrypted space room + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + self.helper.send_state( + space_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + self.helper.join(space_room_id, user1_id, tok=user1_tok) + + # Make an initial Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": { + "all-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": { + "is_encrypted": True, + "room_types": [RoomTypes.SPACE], + }, + }, + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + from_token = channel.json_body["pos"] + + # Make sure the response has the lists we requested + self.assertListEqual( + list(channel.json_body["lists"].keys()), + ["all-list", "foo-list"], + channel.json_body["lists"].keys(), + ) + + # Make sure the lists have the correct rooms + self.assertListEqual( + list(channel.json_body["lists"]["all-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id, room_id], + } + ], + ) + self.assertListEqual( + list(channel.json_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id], + } + ], + ) + + # Everyone leaves the encrypted space room + self.helper.leave(space_room_id, user1_id, tok=user1_tok) + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + + # Make an incremental Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?pos={from_token}", + { + "lists": { + "all-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": { + "is_encrypted": True, + "room_types": [RoomTypes.SPACE], + }, + }, + } + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Make sure the lists have the correct rooms even though we `newly_left` + self.assertListEqual( + list(channel.json_body["lists"]["all-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id, room_id], + } + ], + ) + self.assertListEqual( + list(channel.json_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id], + } + ], + ) + + def test_sort_list(self) -> None: + """ + Test that the `lists` are sorted by `stream_ordering` + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + + # Activity that will order the rooms + self.helper.send(room_id3, "activity in room3", tok=user1_tok) + self.helper.send(room_id1, "activity in room1", tok=user1_tok) + self.helper.send(room_id2, "activity in room2", tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure it has the foo-list we requested + self.assertListEqual( + list(response_body["lists"].keys()), + ["foo-list"], + response_body["lists"].keys(), + ) + + # Make sure the list is sorted in the way we expect + self.assertListEqual( + list(response_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [room_id2, room_id1, room_id3], + } + ], + response_body["lists"]["foo-list"], + ) + + def test_sliced_windows(self) -> None: + """ + Test that the `lists` `ranges` are sliced correctly. Both sides of each range + are inclusive. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + _room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + + # Make the Sliding Sync request for a single room + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure it has the foo-list we requested + self.assertListEqual( + list(response_body["lists"].keys()), + ["foo-list"], + response_body["lists"].keys(), + ) + # Make sure the list is sorted in the way we expect + self.assertListEqual( + list(response_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 0], + "room_ids": [room_id3], + } + ], + response_body["lists"]["foo-list"], + ) + + # Make the Sliding Sync request for the first two rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 1, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure it has the foo-list we requested + self.assertListEqual( + list(response_body["lists"].keys()), + ["foo-list"], + response_body["lists"].keys(), + ) + # Make sure the list is sorted in the way we expect + self.assertListEqual( + list(response_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 1], + "room_ids": [room_id3, room_id2], + } + ], + response_body["lists"]["foo-list"], + ) + + def test_rooms_with_no_updates_do_not_come_down_incremental_sync(self) -> None: + """ + Test that rooms with no updates are returned in subsequent incremental + syncs. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Nothing has happened in the room, so the room should not come down + # /sync. + self.assertIsNone(response_body["rooms"].get(room_id1)) + + def test_empty_initial_room_comes_down_sync(self) -> None: + """ + Test that rooms come down /sync even with empty required state and + timeline limit in initial sync. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + + # Make the Sliding Sync request + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 53f8ae7ece..63df31ec75 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -20,8 +20,7 @@ # import json import logging -from http import HTTPStatus -from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple +from typing import List from parameterized import parameterized, parameterized_class @@ -29,48 +28,21 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import ( - AccountDataTypes, - EduTypes, EventContentFields, EventTypes, - HistoryVisibility, - Membership, ReceiptTypes, RelationTypes, - RoomTypes, -) -from synapse.api.room_versions import RoomVersions -from synapse.events import EventBase -from synapse.handlers.sliding_sync import StateValues -from synapse.rest.client import ( - devices, - knock, - login, - read_marker, - receipts, - room, - sendtodevice, - sync, ) +from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync from synapse.server import HomeServer -from synapse.types import ( - JsonDict, - RoomStreamToken, - SlidingSyncStreamToken, - StreamKeyType, - StreamToken, - UserID, -) -from synapse.types.handlers import SlidingSyncConfig +from synapse.types import JsonDict from synapse.util import Clock -from synapse.util.stringutils import random_string from tests import unittest from tests.federation.transport.test_knocking import ( KnockingStrippedStateEventHelperMixin, ) from tests.server import TimedOutException -from tests.test_utils.event_injection import create_event, mark_event_as_partial_state logger = logging.getLogger(__name__) @@ -1234,5832 +1206,3 @@ class ExcludeRoomTestCase(unittest.HomeserverTestCase): self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["join"]) self.assertIn(self.included_room_id, channel.json_body["rooms"]["join"]) - - -class SlidingSyncBase(unittest.HomeserverTestCase): - """Base class for sliding sync test cases""" - - sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" - - def default_config(self) -> JsonDict: - config = super().default_config() - # Enable sliding sync - config["experimental_features"] = {"msc3575_enabled": True} - return config - - def do_sync( - self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str - ) -> Tuple[JsonDict, str]: - """Do a sliding sync request with given body. - - Asserts the request was successful. - - Attributes: - sync_body: The full request body to use - since: Optional since token - tok: Access token to use - - Returns: - A tuple of the response body and the `pos` field. - """ - - sync_path = self.sync_endpoint - if since: - sync_path += f"?pos={since}" - - channel = self.make_request( - method="POST", - path=sync_path, - content=sync_body, - access_token=tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - - return channel.json_body, channel.json_body["pos"] - - def _bump_notifier_wait_for_events( - self, - user_id: str, - wake_stream_key: Literal[ - StreamKeyType.ACCOUNT_DATA, - StreamKeyType.PRESENCE, - ], - ) -> None: - """ - Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding - Sync results. - - Args: - user_id: The user ID to wake up the notifier for - wake_stream_key: The stream key to wake up. This will create an actual new - entity in that stream so it's best to choose one that won't affect the - Sliding Sync results you're testing for. In other words, if your testing - account data, choose `StreamKeyType.PRESENCE` instead. We support two - possible stream keys because you're probably testing one or the other so - one is always a "safe" option. - """ - # We're expecting some new activity from this point onwards - from_token = self.hs.get_event_sources().get_current_token() - - triggered_notifier_wait_for_events = False - - async def _on_new_acivity( - before_token: StreamToken, after_token: StreamToken - ) -> bool: - nonlocal triggered_notifier_wait_for_events - triggered_notifier_wait_for_events = True - return True - - notifier = self.hs.get_notifier() - - # Listen for some new activity for the user. We're just trying to confirm that - # our bump below actually does what we think it does (triggers new activity for - # the user). - result_awaitable = notifier.wait_for_events( - user_id, - 1000, - _on_new_acivity, - from_token=from_token, - ) - - # Update the account data or presence so that `notifier.wait_for_events(...)` - # wakes up. We chose these two options because they're least likely to show up - # in the Sliding Sync response so it won't affect whether we have results. - if wake_stream_key == StreamKeyType.ACCOUNT_DATA: - self.get_success( - self.hs.get_account_data_handler().add_account_data_for_user( - user_id, - "org.matrix.foobarbaz", - {"foo": "bar"}, - ) - ) - elif wake_stream_key == StreamKeyType.PRESENCE: - sending_user_id = self.register_user( - "user_bump_notifier_wait_for_events_" + random_string(10), "pass" - ) - sending_user_tok = self.login(sending_user_id, "pass") - test_msg = {"foo": "bar"} - chan = self.make_request( - "PUT", - "/_matrix/client/r0/sendToDevice/m.test/1234", - content={"messages": {user_id: {"d1": test_msg}}}, - access_token=sending_user_tok, - ) - self.assertEqual(chan.code, 200, chan.result) - else: - raise AssertionError( - "Unable to wake that stream in _bump_notifier_wait_for_events(...)" - ) - - # Wait for our notifier result - self.get_success(result_awaitable) - - if not triggered_notifier_wait_for_events: - raise AssertionError( - "Expected `notifier.wait_for_events(...)` to be triggered" - ) - - -class SlidingSyncTestCase(SlidingSyncBase): - """ - Tests regarding MSC3575 Sliding Sync `/sync` endpoint. - """ - - servlets = [ - synapse.rest.admin.register_servlets, - login.register_servlets, - room.register_servlets, - sync.register_servlets, - devices.register_servlets, - receipts.register_servlets, - ] - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - self.event_sources = hs.get_event_sources() - self.storage_controllers = hs.get_storage_controllers() - self.account_data_handler = hs.get_account_data_handler() - - def _assertRequiredStateIncludes( - self, - actual_required_state: Any, - expected_state_events: Iterable[EventBase], - exact: bool = False, - ) -> None: - """ - Wrapper around `assertIncludes` to give slightly better looking diff error - messages that include some context "$event_id (type, state_key)". - - Args: - actual_required_state: The "required_state" of a room from a Sliding Sync - request response. - expected_state_events: The expected state events to be included in the - `actual_required_state`. - exact: Whether the actual state should be exactly equal to the expected - state (no extras). - """ - - assert isinstance(actual_required_state, list) - for event in actual_required_state: - assert isinstance(event, dict) - - self.assertIncludes( - { - f'{event["event_id"]} ("{event["type"]}", "{event["state_key"]}")' - for event in actual_required_state - }, - { - f'{event.event_id} ("{event.type}", "{event.state_key}")' - for event in expected_state_events - }, - exact=exact, - # Message to help understand the diff in context - message=str(actual_required_state), - ) - - def _add_new_dm_to_global_account_data( - self, source_user_id: str, target_user_id: str, target_room_id: str - ) -> None: - """ - Helper to handle inserting a new DM for the source user into global account data - (handles all of the list merging). - - Args: - source_user_id: The user ID of the DM mapping we're going to update - target_user_id: User ID of the person the DM is with - target_room_id: Room ID of the DM - """ - - # Get the current DM map - existing_dm_map = self.get_success( - self.store.get_global_account_data_by_type_for_user( - source_user_id, AccountDataTypes.DIRECT - ) - ) - # Scrutinize the account data since it has no concrete type. We're just copying - # everything into a known type. It should be a mapping from user ID to a list of - # room IDs. Ignore anything else. - new_dm_map: Dict[str, List[str]] = {} - if isinstance(existing_dm_map, dict): - for user_id, room_ids in existing_dm_map.items(): - if isinstance(user_id, str) and isinstance(room_ids, list): - for room_id in room_ids: - if isinstance(room_id, str): - new_dm_map[user_id] = new_dm_map.get(user_id, []) + [ - room_id - ] - - # Add the new DM to the map - new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [ - target_room_id - ] - # Save the DM map to global account data - self.get_success( - self.store.add_account_data_for_user( - source_user_id, - AccountDataTypes.DIRECT, - new_dm_map, - ) - ) - - def _create_dm_room( - self, - inviter_user_id: str, - inviter_tok: str, - invitee_user_id: str, - invitee_tok: str, - should_join_room: bool = True, - ) -> str: - """ - Helper to create a DM room as the "inviter" and invite the "invitee" user to the - room. The "invitee" user also will join the room. The `m.direct` account data - will be set for both users. - """ - - # Create a room and send an invite the other user - room_id = self.helper.create_room_as( - inviter_user_id, - is_public=False, - tok=inviter_tok, - ) - self.helper.invite( - room_id, - src=inviter_user_id, - targ=invitee_user_id, - tok=inviter_tok, - extra_data={"is_direct": True}, - ) - if should_join_room: - # Person that was invited joins the room - self.helper.join(room_id, invitee_user_id, tok=invitee_tok) - - # Mimic the client setting the room as a direct message in the global account - # data for both users. - self._add_new_dm_to_global_account_data( - invitee_user_id, inviter_user_id, room_id - ) - self._add_new_dm_to_global_account_data( - inviter_user_id, invitee_user_id, room_id - ) - - return room_id - - def test_sync_list(self) -> None: - """ - Test that room IDs show up in the Sliding Sync `lists` - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 99]], - "required_state": [ - ["m.room.join_rules", ""], - ["m.room.history_visibility", ""], - ["m.space.child", "*"], - ], - "timeline_limit": 1, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Make sure it has the foo-list we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["foo-list"], - response_body["lists"].keys(), - ) - - # Make sure the list includes the room we are joined to - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [room_id], - } - ], - response_body["lists"]["foo-list"], - ) - - def test_wait_for_sync_token(self) -> None: - """ - Test that worker will wait until it catches up to the given token - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a future token that will cause us to wait. Since we never send a new - # event to reach that future stream_ordering, the worker will wait until the - # full timeout. - stream_id_gen = self.store.get_events_stream_id_generator() - stream_id = self.get_success(stream_id_gen.get_next().__aenter__()) - current_token = self.event_sources.get_current_token() - future_position_token = current_token.copy_and_replace( - StreamKeyType.ROOM, - RoomStreamToken(stream=stream_id), - ) - - future_position_token_serialized = self.get_success( - SlidingSyncStreamToken(future_position_token, 0).to_string(self.store) - ) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 99]], - "required_state": [ - ["m.room.join_rules", ""], - ["m.room.history_visibility", ""], - ["m.space.child", "*"], - ], - "timeline_limit": 1, - } - } - } - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={future_position_token_serialized}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 10 seconds to make `notifier.wait_for_stream_token(from_token)` - # timeout - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=9900) - channel.await_result(timeout_ms=200) - self.assertEqual(channel.code, 200, channel.json_body) - - # We expect the next `pos` in the result to be the same as what we requested - # with because we weren't able to find anything new yet. - self.assertEqual(channel.json_body["pos"], future_position_token_serialized) - - def test_wait_for_new_data(self) -> None: - """ - Test to make sure that the Sliding Sync request waits for new data to arrive. - - (Only applies to incremental syncs with a `timeout` specified) - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 0]], - "required_state": [], - "timeline_limit": 1, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?timeout=10000&pos={from_token}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=5000) - # Bump the room with new events to trigger new results - event_response1 = self.helper.send( - room_id, "new activity in room", tok=user1_tok - ) - # Should respond before the 10 second timeout - channel.await_result(timeout_ms=3000) - self.assertEqual(channel.code, 200, channel.json_body) - - # Check to make sure the new event is returned - self.assertEqual( - [ - event["event_id"] - for event in channel.json_body["rooms"][room_id]["timeline"] - ], - [ - event_response1["event_id"], - ], - channel.json_body["rooms"][room_id]["timeline"], - ) - - def test_wait_for_new_data_timeout(self) -> None: - """ - Test to make sure that the Sliding Sync request waits for new data to arrive but - no data ever arrives so we timeout. We're also making sure that the default data - doesn't trigger a false-positive for new data. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 0]], - "required_state": [], - "timeline_limit": 1, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?timeout=10000&pos={from_token}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=5000) - # Wake-up `notifier.wait_for_events(...)` that will cause us test - # `SlidingSyncResult.__bool__` for new results. - self._bump_notifier_wait_for_events( - user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA - ) - # Block for a little bit more to ensure we don't see any new results. - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=4000) - # Wait for the sync to complete (wait for the rest of the 10 second timeout, - # 5000 + 4000 + 1200 > 10000) - channel.await_result(timeout_ms=1200) - self.assertEqual(channel.code, 200, channel.json_body) - - # There should be no room sent down. - self.assertFalse(channel.json_body["rooms"]) - - def test_filter_list(self) -> None: - """ - Test that filters apply to `lists` - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - # Create a DM room - joined_dm_room_id = self._create_dm_room( - inviter_user_id=user1_id, - inviter_tok=user1_tok, - invitee_user_id=user2_id, - invitee_tok=user2_tok, - should_join_room=True, - ) - invited_dm_room_id = self._create_dm_room( - inviter_user_id=user1_id, - inviter_tok=user1_tok, - invitee_user_id=user2_id, - invitee_tok=user2_tok, - should_join_room=False, - ) - - # Create a normal room - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - - # Create a room that user1 is invited to - invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - # Absense of filters does not imply "False" values - "all": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {}, - }, - # Test single truthy filter - "dms": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": True}, - }, - # Test single falsy filter - "non-dms": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": False}, - }, - # Test how multiple filters should stack (AND'd together) - "room-invites": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": False, "is_invite": True}, - }, - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Make sure it has the foo-list we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["all", "dms", "non-dms", "room-invites"], - response_body["lists"].keys(), - ) - - # Make sure the lists have the correct rooms - self.assertListEqual( - list(response_body["lists"]["all"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [ - invite_room_id, - room_id, - invited_dm_room_id, - joined_dm_room_id, - ], - } - ], - list(response_body["lists"]["all"]), - ) - self.assertListEqual( - list(response_body["lists"]["dms"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [invited_dm_room_id, joined_dm_room_id], - } - ], - list(response_body["lists"]["dms"]), - ) - self.assertListEqual( - list(response_body["lists"]["non-dms"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [invite_room_id, room_id], - } - ], - list(response_body["lists"]["non-dms"]), - ) - self.assertListEqual( - list(response_body["lists"]["room-invites"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [invite_room_id], - } - ], - list(response_body["lists"]["room-invites"]), - ) - - # Ensure DM's are correctly marked - self.assertDictEqual( - { - room_id: room.get("is_dm") - for room_id, room in response_body["rooms"].items() - }, - { - invite_room_id: None, - room_id: None, - invited_dm_room_id: True, - joined_dm_room_id: True, - }, - ) - - def test_filter_regardless_of_membership_server_left_room(self) -> None: - """ - Test that filters apply to rooms regardless of membership. We're also - compounding the problem by having all of the local users leave the room causing - our server to leave the room. - - We want to make sure that if someone is filtering rooms, and leaves, you still - get that final update down sync that you left. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - # Create a normal room - room_id = self.helper.create_room_as(user1_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - - # Create an encrypted space room - space_room_id = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - self.helper.send_state( - space_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user2_tok, - ) - self.helper.join(space_room_id, user1_id, tok=user1_tok) - - # Make an initial Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "all-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 0, - "filters": {}, - }, - "foo-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": { - "is_encrypted": True, - "room_types": [RoomTypes.SPACE], - }, - }, - } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - from_token = channel.json_body["pos"] - - # Make sure the response has the lists we requested - self.assertListEqual( - list(channel.json_body["lists"].keys()), - ["all-list", "foo-list"], - channel.json_body["lists"].keys(), - ) - - # Make sure the lists have the correct rooms - self.assertListEqual( - list(channel.json_body["lists"]["all-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id, room_id], - } - ], - ) - self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id], - } - ], - ) - - # Everyone leaves the encrypted space room - self.helper.leave(space_room_id, user1_id, tok=user1_tok) - self.helper.leave(space_room_id, user2_id, tok=user2_tok) - - # Make an incremental Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - { - "lists": { - "all-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 0, - "filters": {}, - }, - "foo-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": { - "is_encrypted": True, - "room_types": [RoomTypes.SPACE], - }, - }, - } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - - # Make sure the lists have the correct rooms even though we `newly_left` - self.assertListEqual( - list(channel.json_body["lists"]["all-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id, room_id], - } - ], - ) - self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id], - } - ], - ) - - def test_sort_list(self) -> None: - """ - Test that the `lists` are sorted by `stream_ordering` - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) - room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) - room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) - - # Activity that will order the rooms - self.helper.send(room_id3, "activity in room3", tok=user1_tok) - self.helper.send(room_id1, "activity in room1", tok=user1_tok) - self.helper.send(room_id2, "activity in room2", tok=user1_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 99]], - "required_state": [ - ["m.room.join_rules", ""], - ["m.room.history_visibility", ""], - ["m.space.child", "*"], - ], - "timeline_limit": 1, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Make sure it has the foo-list we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["foo-list"], - response_body["lists"].keys(), - ) - - # Make sure the list is sorted in the way we expect - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [room_id2, room_id1, room_id3], - } - ], - response_body["lists"]["foo-list"], - ) - - def test_sliced_windows(self) -> None: - """ - Test that the `lists` `ranges` are sliced correctly. Both sides of each range - are inclusive. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - _room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) - room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) - room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) - - # Make the Sliding Sync request for a single room - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 0]], - "required_state": [], - "timeline_limit": 1, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Make sure it has the foo-list we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["foo-list"], - response_body["lists"].keys(), - ) - # Make sure the list is sorted in the way we expect - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 0], - "room_ids": [room_id3], - } - ], - response_body["lists"]["foo-list"], - ) - - # Make the Sliding Sync request for the first two rooms - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 1, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Make sure it has the foo-list we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["foo-list"], - response_body["lists"].keys(), - ) - # Make sure the list is sorted in the way we expect - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 1], - "room_ids": [room_id3, room_id2], - } - ], - response_body["lists"]["foo-list"], - ) - - def test_rooms_meta_when_joined(self) -> None: - """ - Test that the `rooms` `name` and `avatar` are included in the response and - reflect the current state of the room when the user is joined to the room. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - "name": "my super room", - }, - ) - # Set the room avatar URL - self.helper.send_state( - room_id1, - EventTypes.RoomAvatar, - {"url": "mxc://DUMMY_MEDIA_ID"}, - tok=user2_tok, - ) - - self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Reflect the current state of the room - self.assertEqual( - response_body["rooms"][room_id1]["name"], - "my super room", - response_body["rooms"][room_id1], - ) - self.assertEqual( - response_body["rooms"][room_id1]["avatar"], - "mxc://DUMMY_MEDIA_ID", - response_body["rooms"][room_id1], - ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - 2, - ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 0, - ) - self.assertIsNone( - response_body["rooms"][room_id1].get("is_dm"), - ) - - def test_rooms_meta_when_invited(self) -> None: - """ - Test that the `rooms` `name` and `avatar` are included in the response and - reflect the current state of the room when the user is invited to the room. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - "name": "my super room", - }, - ) - # Set the room avatar URL - self.helper.send_state( - room_id1, - EventTypes.RoomAvatar, - {"url": "mxc://DUMMY_MEDIA_ID"}, - tok=user2_tok, - ) - - # User1 is invited to the room - self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - - # Update the room name after user1 has left - self.helper.send_state( - room_id1, - EventTypes.Name, - {"name": "my super duper room"}, - tok=user2_tok, - ) - # Update the room avatar URL after user1 has left - self.helper.send_state( - room_id1, - EventTypes.RoomAvatar, - {"url": "mxc://UPDATED_DUMMY_MEDIA_ID"}, - tok=user2_tok, - ) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # This should still reflect the current state of the room even when the user is - # invited. - self.assertEqual( - response_body["rooms"][room_id1]["name"], - "my super duper room", - response_body["rooms"][room_id1], - ) - self.assertEqual( - response_body["rooms"][room_id1]["avatar"], - "mxc://UPDATED_DUMMY_MEDIA_ID", - response_body["rooms"][room_id1], - ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - 1, - ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 1, - ) - self.assertIsNone( - response_body["rooms"][room_id1].get("is_dm"), - ) - - def test_rooms_meta_when_banned(self) -> None: - """ - Test that the `rooms` `name` and `avatar` reflect the state of the room when the - user was banned (do not leak current state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - "name": "my super room", - }, - ) - # Set the room avatar URL - self.helper.send_state( - room_id1, - EventTypes.RoomAvatar, - {"url": "mxc://DUMMY_MEDIA_ID"}, - tok=user2_tok, - ) - - self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - - # Update the room name after user1 has left - self.helper.send_state( - room_id1, - EventTypes.Name, - {"name": "my super duper room"}, - tok=user2_tok, - ) - # Update the room avatar URL after user1 has left - self.helper.send_state( - room_id1, - EventTypes.RoomAvatar, - {"url": "mxc://UPDATED_DUMMY_MEDIA_ID"}, - tok=user2_tok, - ) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Reflect the state of the room at the time of leaving - self.assertEqual( - response_body["rooms"][room_id1]["name"], - "my super room", - response_body["rooms"][room_id1], - ) - self.assertEqual( - response_body["rooms"][room_id1]["avatar"], - "mxc://DUMMY_MEDIA_ID", - response_body["rooms"][room_id1], - ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - # FIXME: The actual number should be "1" (user2) but we currently don't - # support this for rooms where the user has left/been banned. - 0, - ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 0, - ) - self.assertIsNone( - response_body["rooms"][room_id1].get("is_dm"), - ) - - def test_rooms_meta_heroes(self) -> None: - """ - Test that the `rooms` `heroes` are included in the response when the room - doesn't have a room name set. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user3_id = self.register_user("user3", "pass") - _user3_tok = self.login(user3_id, "pass") - - room_id1 = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - "name": "my super room", - }, - ) - self.helper.join(room_id1, user1_id, tok=user1_tok) - # User3 is invited - self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok) - - room_id2 = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - # No room name set so that `heroes` is populated - # - # "name": "my super room2", - }, - ) - self.helper.join(room_id2, user1_id, tok=user1_tok) - # User3 is invited - self.helper.invite(room_id2, src=user2_id, targ=user3_id, tok=user2_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Room1 has a name so we shouldn't see any `heroes` which the client would use - # the calculate the room name themselves. - self.assertEqual( - response_body["rooms"][room_id1]["name"], - "my super room", - response_body["rooms"][room_id1], - ) - self.assertIsNone(response_body["rooms"][room_id1].get("heroes")) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - 2, - ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 1, - ) - - # Room2 doesn't have a name so we should see `heroes` populated - self.assertIsNone(response_body["rooms"][room_id2].get("name")) - self.assertCountEqual( - [ - hero["user_id"] - for hero in response_body["rooms"][room_id2].get("heroes", []) - ], - # Heroes shouldn't include the user themselves (we shouldn't see user1) - [user2_id, user3_id], - ) - self.assertEqual( - response_body["rooms"][room_id2]["joined_count"], - 2, - ) - self.assertEqual( - response_body["rooms"][room_id2]["invited_count"], - 1, - ) - - # We didn't request any state so we shouldn't see any `required_state` - self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) - self.assertIsNone(response_body["rooms"][room_id2].get("required_state")) - - def test_rooms_meta_heroes_max(self) -> None: - """ - Test that the `rooms` `heroes` only includes the first 5 users (not including - yourself). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user3_id = self.register_user("user3", "pass") - user3_tok = self.login(user3_id, "pass") - user4_id = self.register_user("user4", "pass") - user4_tok = self.login(user4_id, "pass") - user5_id = self.register_user("user5", "pass") - user5_tok = self.login(user5_id, "pass") - user6_id = self.register_user("user6", "pass") - user6_tok = self.login(user6_id, "pass") - user7_id = self.register_user("user7", "pass") - user7_tok = self.login(user7_id, "pass") - - room_id1 = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - # No room name set so that `heroes` is populated - # - # "name": "my super room", - }, - ) - self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.join(room_id1, user3_id, tok=user3_tok) - self.helper.join(room_id1, user4_id, tok=user4_tok) - self.helper.join(room_id1, user5_id, tok=user5_tok) - self.helper.join(room_id1, user6_id, tok=user6_tok) - self.helper.join(room_id1, user7_id, tok=user7_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Room2 doesn't have a name so we should see `heroes` populated - self.assertIsNone(response_body["rooms"][room_id1].get("name")) - self.assertCountEqual( - [ - hero["user_id"] - for hero in response_body["rooms"][room_id1].get("heroes", []) - ], - # Heroes should be the first 5 users in the room (excluding the user - # themselves, we shouldn't see `user1`) - [user2_id, user3_id, user4_id, user5_id, user6_id], - ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - 7, - ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 0, - ) - - # We didn't request any state so we shouldn't see any `required_state` - self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) - - def test_rooms_meta_heroes_when_banned(self) -> None: - """ - Test that the `rooms` `heroes` are included in the response when the room - doesn't have a room name set but doesn't leak information past their ban. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user3_id = self.register_user("user3", "pass") - _user3_tok = self.login(user3_id, "pass") - user4_id = self.register_user("user4", "pass") - user4_tok = self.login(user4_id, "pass") - user5_id = self.register_user("user5", "pass") - _user5_tok = self.login(user5_id, "pass") - - room_id1 = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - # No room name set so that `heroes` is populated - # - # "name": "my super room", - }, - ) - # User1 joins the room - self.helper.join(room_id1, user1_id, tok=user1_tok) - # User3 is invited - self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok) - - # User1 is banned from the room - self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - - # User4 joins the room after user1 is banned - self.helper.join(room_id1, user4_id, tok=user4_tok) - # User5 is invited after user1 is banned - self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Room2 doesn't have a name so we should see `heroes` populated - self.assertIsNone(response_body["rooms"][room_id1].get("name")) - self.assertCountEqual( - [ - hero["user_id"] - for hero in response_body["rooms"][room_id1].get("heroes", []) - ], - # Heroes shouldn't include the user themselves (we shouldn't see user1). We - # also shouldn't see user4 since they joined after user1 was banned. - # - # FIXME: The actual result should be `[user2_id, user3_id]` but we currently - # don't support this for rooms where the user has left/been banned. - [], - ) - - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - # FIXME: The actual number should be "1" (user2) but we currently don't - # support this for rooms where the user has left/been banned. - 0, - ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - # We shouldn't see user5 since they were invited after user1 was banned. - # - # FIXME: The actual number should be "1" (user3) but we currently don't - # support this for rooms where the user has left/been banned. - 0, - ) - - def test_rooms_limited_initial_sync(self) -> None: - """ - Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit` - on initial sync. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.send(room_id1, "activity1", tok=user2_tok) - self.helper.send(room_id1, "activity2", tok=user2_tok) - event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok) - event_pos3 = self.get_success( - self.store.get_position_for_event(event_response3["event_id"]) - ) - event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok) - event_pos4 = self.get_success( - self.store.get_position_for_event(event_response4["event_id"]) - ) - event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok) - user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 3, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # We expect to saturate the `timeline_limit` (there are more than 3 messages in the room) - self.assertEqual( - response_body["rooms"][room_id1]["limited"], - True, - response_body["rooms"][room_id1], - ) - # Check to make sure the latest events are returned - self.assertEqual( - [ - event["event_id"] - for event in response_body["rooms"][room_id1]["timeline"] - ], - [ - event_response4["event_id"], - event_response5["event_id"], - user1_join_response["event_id"], - ], - response_body["rooms"][room_id1]["timeline"], - ) - - # Check to make sure the `prev_batch` points at the right place - prev_batch_token = self.get_success( - StreamToken.from_string( - self.store, response_body["rooms"][room_id1]["prev_batch"] - ) - ) - prev_batch_room_stream_token_serialized = self.get_success( - prev_batch_token.room_key.to_string(self.store) - ) - # If we use the `prev_batch` token to look backwards, we should see `event3` - # next so make sure the token encompasses it - self.assertEqual( - event_pos3.persisted_after(prev_batch_token.room_key), - False, - f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be >= event_pos3={self.get_success(event_pos3.to_room_stream_token().to_string(self.store))}", - ) - # If we use the `prev_batch` token to look backwards, we shouldn't see `event4` - # anymore since it was just returned in this response. - self.assertEqual( - event_pos4.persisted_after(prev_batch_token.room_key), - True, - f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be < event_pos4={self.get_success(event_pos4.to_room_stream_token().to_string(self.store))}", - ) - - # With no `from_token` (initial sync), it's all historical since there is no - # "live" range - self.assertEqual( - response_body["rooms"][room_id1]["num_live"], - 0, - response_body["rooms"][room_id1], - ) - - def test_rooms_not_limited_initial_sync(self) -> None: - """ - Test that we mark `rooms` as `limited=False` when there are no more events to - paginate to. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.send(room_id1, "activity1", tok=user2_tok) - self.helper.send(room_id1, "activity2", tok=user2_tok) - self.helper.send(room_id1, "activity3", tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Make the Sliding Sync request - timeline_limit = 100 - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": timeline_limit, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # The timeline should be `limited=False` because we have all of the events (no - # more to paginate to) - self.assertEqual( - response_body["rooms"][room_id1]["limited"], - False, - response_body["rooms"][room_id1], - ) - expected_number_of_events = 9 - # We're just looking to make sure we got all of the events before hitting the `timeline_limit` - self.assertEqual( - len(response_body["rooms"][room_id1]["timeline"]), - expected_number_of_events, - response_body["rooms"][room_id1]["timeline"], - ) - self.assertLessEqual(expected_number_of_events, timeline_limit) - - # With no `from_token` (initial sync), it's all historical since there is no - # "live" token range. - self.assertEqual( - response_body["rooms"][room_id1]["num_live"], - 0, - response_body["rooms"][room_id1], - ) - - def test_rooms_incremental_sync(self) -> None: - """ - Test `rooms` data during an incremental sync after an initial sync. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.send(room_id1, "activity before initial sync1", tok=user2_tok) - - # Make an initial Sliding Sync request to grab a token. This is also a sanity - # check that we can go from initial to incremental sync. - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 3, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Send some events but don't send enough to saturate the `timeline_limit`. - # We want to later test that we only get the new events since the `next_pos` - event_response2 = self.helper.send(room_id1, "activity after2", tok=user2_tok) - event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok) - - # Make an incremental Sliding Sync request (what we're trying to test) - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # We only expect to see the new events since the last sync which isn't enough to - # fill up the `timeline_limit`. - self.assertEqual( - response_body["rooms"][room_id1]["limited"], - False, - f'Our `timeline_limit` was {sync_body["lists"]["foo-list"]["timeline_limit"]} ' - + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. ' - + str(response_body["rooms"][room_id1]), - ) - # Check to make sure the latest events are returned - self.assertEqual( - [ - event["event_id"] - for event in response_body["rooms"][room_id1]["timeline"] - ], - [ - event_response2["event_id"], - event_response3["event_id"], - ], - response_body["rooms"][room_id1]["timeline"], - ) - - # All events are "live" - self.assertEqual( - response_body["rooms"][room_id1]["num_live"], - 2, - response_body["rooms"][room_id1], - ) - - def test_rooms_bump_stamp(self) -> None: - """ - Test that `bump_stamp` is present and pointing to relevant events. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - room_id1 = self.helper.create_room_as( - user1_id, - tok=user1_tok, - ) - event_response1 = message_response = self.helper.send( - room_id1, "message in room1", tok=user1_tok - ) - event_pos1 = self.get_success( - self.store.get_position_for_event(event_response1["event_id"]) - ) - room_id2 = self.helper.create_room_as( - user1_id, - tok=user1_tok, - ) - send_response2 = self.helper.send(room_id2, "message in room2", tok=user1_tok) - event_pos2 = self.get_success( - self.store.get_position_for_event(send_response2["event_id"]) - ) - - # Send a reaction in room1 but it shouldn't affect the `bump_stamp` - # because reactions are not part of the `DEFAULT_BUMP_EVENT_TYPES` - self.helper.send_event( - room_id1, - type=EventTypes.Reaction, - content={ - "m.relates_to": { - "event_id": message_response["event_id"], - "key": "👍", - "rel_type": "m.annotation", - } - }, - tok=user1_tok, - ) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 100, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Make sure it has the foo-list we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["foo-list"], - response_body["lists"].keys(), - ) - - # Make sure the list includes the rooms in the right order - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 1], - # room1 sorts before room2 because it has the latest event (the - # reaction) - "room_ids": [room_id1, room_id2], - } - ], - response_body["lists"]["foo-list"], - ) - - # The `bump_stamp` for room1 should point at the latest message (not the - # reaction since it's not one of the `DEFAULT_BUMP_EVENT_TYPES`) - self.assertEqual( - response_body["rooms"][room_id1]["bump_stamp"], - event_pos1.stream, - response_body["rooms"][room_id1], - ) - - # The `bump_stamp` for room2 should point at the latest message - self.assertEqual( - response_body["rooms"][room_id2]["bump_stamp"], - event_pos2.stream, - response_body["rooms"][room_id2], - ) - - def test_rooms_bump_stamp_backfill(self) -> None: - """ - Test that `bump_stamp` ignores backfilled events, i.e. events with a - negative stream ordering. - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote room - creator = "@user:other" - room_id = "!foo:other" - shared_kwargs = { - "room_id": room_id, - "room_version": "10", - } - - create_tuple = self.get_success( - create_event( - self.hs, - prev_event_ids=[], - type=EventTypes.Create, - state_key="", - sender=creator, - **shared_kwargs, - ) - ) - creator_tuple = self.get_success( - create_event( - self.hs, - prev_event_ids=[create_tuple[0].event_id], - auth_event_ids=[create_tuple[0].event_id], - type=EventTypes.Member, - state_key=creator, - content={"membership": Membership.JOIN}, - sender=creator, - **shared_kwargs, - ) - ) - # We add a message event as a valid "bump type" - msg_tuple = self.get_success( - create_event( - self.hs, - prev_event_ids=[creator_tuple[0].event_id], - auth_event_ids=[create_tuple[0].event_id], - type=EventTypes.Message, - content={"body": "foo", "msgtype": "m.text"}, - sender=creator, - **shared_kwargs, - ) - ) - invite_tuple = self.get_success( - create_event( - self.hs, - prev_event_ids=[msg_tuple[0].event_id], - auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], - type=EventTypes.Member, - state_key=user1_id, - content={"membership": Membership.INVITE}, - sender=creator, - **shared_kwargs, - ) - ) - - remote_events_and_contexts = [ - create_tuple, - creator_tuple, - msg_tuple, - invite_tuple, - ] - - # Ensure the local HS knows the room version - self.get_success( - self.store.store_room(room_id, creator, False, RoomVersions.V10) - ) - - # Persist these events as backfilled events. - persistence = self.hs.get_storage_controllers().persistence - assert persistence is not None - - for event, context in remote_events_and_contexts: - self.get_success(persistence.persist_event(event, context, backfilled=True)) - - # Now we join the local user to the room - join_tuple = self.get_success( - create_event( - self.hs, - prev_event_ids=[invite_tuple[0].event_id], - auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id], - type=EventTypes.Member, - state_key=user1_id, - content={"membership": Membership.JOIN}, - sender=user1_id, - **shared_kwargs, - ) - ) - self.get_success(persistence.persist_event(*join_tuple)) - - # Doing an SS request should return a positive `bump_stamp`, even though - # the only event that matches the bump types has as negative stream - # ordering. - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 5, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0) - - def test_rooms_newly_joined_incremental_sync(self) -> None: - """ - Test that when we make an incremental sync with a `newly_joined` `rooms`, we are - able to see some historical events before the `from_token`. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.send(room_id1, "activity before token1", tok=user2_tok) - event_response2 = self.helper.send( - room_id1, "activity before token2", tok=user2_tok - ) - - # The `timeline_limit` is set to 4 so we can at least see one historical event - # before the `from_token`. We should see historical events because this is a - # `newly_joined` room. - timeline_limit = 4 - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": timeline_limit, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Join the room after the `from_token` which will make us consider this room as - # `newly_joined`. - user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Send some events but don't send enough to saturate the `timeline_limit`. - # We want to later test that we only get the new events since the `next_pos` - event_response3 = self.helper.send( - room_id1, "activity after token3", tok=user2_tok - ) - event_response4 = self.helper.send( - room_id1, "activity after token4", tok=user2_tok - ) - - # Make an incremental Sliding Sync request (what we're trying to test) - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # We should see the new events and the rest should be filled with historical - # events which will make us `limited=True` since there are more to paginate to. - self.assertEqual( - response_body["rooms"][room_id1]["limited"], - True, - f"Our `timeline_limit` was {timeline_limit} " - + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. ' - + str(response_body["rooms"][room_id1]), - ) - # Check to make sure that the "live" and historical events are returned - self.assertEqual( - [ - event["event_id"] - for event in response_body["rooms"][room_id1]["timeline"] - ], - [ - event_response2["event_id"], - user1_join_response["event_id"], - event_response3["event_id"], - event_response4["event_id"], - ], - response_body["rooms"][room_id1]["timeline"], - ) - - # Only events after the `from_token` are "live" (join, event3, event4) - self.assertEqual( - response_body["rooms"][room_id1]["num_live"], - 3, - response_body["rooms"][room_id1], - ) - - def test_rooms_invite_shared_history_initial_sync(self) -> None: - """ - Test that `rooms` we are invited to have some stripped `invite_state` during an - initial sync. - - This is an `invite` room so we should only have `stripped_state` (no `timeline`) - but we also shouldn't see any timeline events because the history visiblity is - `shared` and we haven't joined the room yet. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user1 = UserID.from_string(user1_id) - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user2 = UserID.from_string(user2_id) - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - # Ensure we're testing with a room with `shared` history visibility which means - # history visible until you actually join the room. - history_visibility_response = self.helper.get_state( - room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok - ) - self.assertEqual( - history_visibility_response.get("history_visibility"), - HistoryVisibility.SHARED, - ) - - self.helper.send(room_id1, "activity before1", tok=user2_tok) - self.helper.send(room_id1, "activity before2", tok=user2_tok) - self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - self.helper.send(room_id1, "activity after3", tok=user2_tok) - self.helper.send(room_id1, "activity after4", tok=user2_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 3, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # `timeline` is omitted for `invite` rooms with `stripped_state` - self.assertIsNone( - response_body["rooms"][room_id1].get("timeline"), - response_body["rooms"][room_id1], - ) - # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("num_live"), - response_body["rooms"][room_id1], - ) - # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("limited"), - response_body["rooms"][room_id1], - ) - # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("prev_batch"), - response_body["rooms"][room_id1], - ) - # `required_state` is omitted for `invite` rooms with `stripped_state` - self.assertIsNone( - response_body["rooms"][room_id1].get("required_state"), - response_body["rooms"][room_id1], - ) - # We should have some `stripped_state` so the potential joiner can identify the - # room (we don't care about the order). - self.assertCountEqual( - response_body["rooms"][room_id1]["invite_state"], - [ - { - "content": {"creator": user2_id, "room_version": "10"}, - "sender": user2_id, - "state_key": "", - "type": "m.room.create", - }, - { - "content": {"join_rule": "public"}, - "sender": user2_id, - "state_key": "", - "type": "m.room.join_rules", - }, - { - "content": {"displayname": user2.localpart, "membership": "join"}, - "sender": user2_id, - "state_key": user2_id, - "type": "m.room.member", - }, - { - "content": {"displayname": user1.localpart, "membership": "invite"}, - "sender": user2_id, - "state_key": user1_id, - "type": "m.room.member", - }, - ], - response_body["rooms"][room_id1]["invite_state"], - ) - - def test_rooms_invite_shared_history_incremental_sync(self) -> None: - """ - Test that `rooms` we are invited to have some stripped `invite_state` during an - incremental sync. - - This is an `invite` room so we should only have `stripped_state` (no `timeline`) - but we also shouldn't see any timeline events because the history visiblity is - `shared` and we haven't joined the room yet. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user1 = UserID.from_string(user1_id) - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user2 = UserID.from_string(user2_id) - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - # Ensure we're testing with a room with `shared` history visibility which means - # history visible until you actually join the room. - history_visibility_response = self.helper.get_state( - room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok - ) - self.assertEqual( - history_visibility_response.get("history_visibility"), - HistoryVisibility.SHARED, - ) - - self.helper.send(room_id1, "activity before invite1", tok=user2_tok) - self.helper.send(room_id1, "activity before invite2", tok=user2_tok) - self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - self.helper.send(room_id1, "activity after invite3", tok=user2_tok) - self.helper.send(room_id1, "activity after invite4", tok=user2_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 3, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - self.helper.send(room_id1, "activity after token5", tok=user2_tok) - self.helper.send(room_id1, "activity after toekn6", tok=user2_tok) - - # Make the Sliding Sync request - response_body, from_token = self.do_sync( - sync_body, since=from_token, tok=user1_tok - ) - - # `timeline` is omitted for `invite` rooms with `stripped_state` - self.assertIsNone( - response_body["rooms"][room_id1].get("timeline"), - response_body["rooms"][room_id1], - ) - # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("num_live"), - response_body["rooms"][room_id1], - ) - # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("limited"), - response_body["rooms"][room_id1], - ) - # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("prev_batch"), - response_body["rooms"][room_id1], - ) - # `required_state` is omitted for `invite` rooms with `stripped_state` - self.assertIsNone( - response_body["rooms"][room_id1].get("required_state"), - response_body["rooms"][room_id1], - ) - # We should have some `stripped_state` so the potential joiner can identify the - # room (we don't care about the order). - self.assertCountEqual( - response_body["rooms"][room_id1]["invite_state"], - [ - { - "content": {"creator": user2_id, "room_version": "10"}, - "sender": user2_id, - "state_key": "", - "type": "m.room.create", - }, - { - "content": {"join_rule": "public"}, - "sender": user2_id, - "state_key": "", - "type": "m.room.join_rules", - }, - { - "content": {"displayname": user2.localpart, "membership": "join"}, - "sender": user2_id, - "state_key": user2_id, - "type": "m.room.member", - }, - { - "content": {"displayname": user1.localpart, "membership": "invite"}, - "sender": user2_id, - "state_key": user1_id, - "type": "m.room.member", - }, - ], - response_body["rooms"][room_id1]["invite_state"], - ) - - def test_rooms_invite_world_readable_history_initial_sync(self) -> None: - """ - Test that `rooms` we are invited to have some stripped `invite_state` during an - initial sync. - - This is an `invite` room so we should only have `stripped_state` (no `timeline`) - but depending on the semantics we decide, we could potentially see some - historical events before/after the `from_token` because the history is - `world_readable`. Same situation for events after the `from_token` if the - history visibility was set to `invited`. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user1 = UserID.from_string(user1_id) - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user2 = UserID.from_string(user2_id) - - room_id1 = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - "preset": "public_chat", - "initial_state": [ - { - "content": { - "history_visibility": HistoryVisibility.WORLD_READABLE - }, - "state_key": "", - "type": EventTypes.RoomHistoryVisibility, - } - ], - }, - ) - # Ensure we're testing with a room with `world_readable` history visibility - # which means events are visible to anyone even without membership. - history_visibility_response = self.helper.get_state( - room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok - ) - self.assertEqual( - history_visibility_response.get("history_visibility"), - HistoryVisibility.WORLD_READABLE, - ) - - self.helper.send(room_id1, "activity before1", tok=user2_tok) - self.helper.send(room_id1, "activity before2", tok=user2_tok) - self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - self.helper.send(room_id1, "activity after3", tok=user2_tok) - self.helper.send(room_id1, "activity after4", tok=user2_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - # Large enough to see the latest events and before the invite - "timeline_limit": 4, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # `timeline` is omitted for `invite` rooms with `stripped_state` - self.assertIsNone( - response_body["rooms"][room_id1].get("timeline"), - response_body["rooms"][room_id1], - ) - # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("num_live"), - response_body["rooms"][room_id1], - ) - # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("limited"), - response_body["rooms"][room_id1], - ) - # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("prev_batch"), - response_body["rooms"][room_id1], - ) - # `required_state` is omitted for `invite` rooms with `stripped_state` - self.assertIsNone( - response_body["rooms"][room_id1].get("required_state"), - response_body["rooms"][room_id1], - ) - # We should have some `stripped_state` so the potential joiner can identify the - # room (we don't care about the order). - self.assertCountEqual( - response_body["rooms"][room_id1]["invite_state"], - [ - { - "content": {"creator": user2_id, "room_version": "10"}, - "sender": user2_id, - "state_key": "", - "type": "m.room.create", - }, - { - "content": {"join_rule": "public"}, - "sender": user2_id, - "state_key": "", - "type": "m.room.join_rules", - }, - { - "content": {"displayname": user2.localpart, "membership": "join"}, - "sender": user2_id, - "state_key": user2_id, - "type": "m.room.member", - }, - { - "content": {"displayname": user1.localpart, "membership": "invite"}, - "sender": user2_id, - "state_key": user1_id, - "type": "m.room.member", - }, - ], - response_body["rooms"][room_id1]["invite_state"], - ) - - def test_rooms_invite_world_readable_history_incremental_sync(self) -> None: - """ - Test that `rooms` we are invited to have some stripped `invite_state` during an - incremental sync. - - This is an `invite` room so we should only have `stripped_state` (no `timeline`) - but depending on the semantics we decide, we could potentially see some - historical events before/after the `from_token` because the history is - `world_readable`. Same situation for events after the `from_token` if the - history visibility was set to `invited`. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user1 = UserID.from_string(user1_id) - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user2 = UserID.from_string(user2_id) - - room_id1 = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - "preset": "public_chat", - "initial_state": [ - { - "content": { - "history_visibility": HistoryVisibility.WORLD_READABLE - }, - "state_key": "", - "type": EventTypes.RoomHistoryVisibility, - } - ], - }, - ) - # Ensure we're testing with a room with `world_readable` history visibility - # which means events are visible to anyone even without membership. - history_visibility_response = self.helper.get_state( - room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok - ) - self.assertEqual( - history_visibility_response.get("history_visibility"), - HistoryVisibility.WORLD_READABLE, - ) - - self.helper.send(room_id1, "activity before invite1", tok=user2_tok) - self.helper.send(room_id1, "activity before invite2", tok=user2_tok) - self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - self.helper.send(room_id1, "activity after invite3", tok=user2_tok) - self.helper.send(room_id1, "activity after invite4", tok=user2_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - # Large enough to see the latest events and before the invite - "timeline_limit": 4, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - self.helper.send(room_id1, "activity after token5", tok=user2_tok) - self.helper.send(room_id1, "activity after toekn6", tok=user2_tok) - - # Make the incremental Sliding Sync request - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # `timeline` is omitted for `invite` rooms with `stripped_state` - self.assertIsNone( - response_body["rooms"][room_id1].get("timeline"), - response_body["rooms"][room_id1], - ) - # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("num_live"), - response_body["rooms"][room_id1], - ) - # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("limited"), - response_body["rooms"][room_id1], - ) - # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway) - self.assertIsNone( - response_body["rooms"][room_id1].get("prev_batch"), - response_body["rooms"][room_id1], - ) - # `required_state` is omitted for `invite` rooms with `stripped_state` - self.assertIsNone( - response_body["rooms"][room_id1].get("required_state"), - response_body["rooms"][room_id1], - ) - # We should have some `stripped_state` so the potential joiner can identify the - # room (we don't care about the order). - self.assertCountEqual( - response_body["rooms"][room_id1]["invite_state"], - [ - { - "content": {"creator": user2_id, "room_version": "10"}, - "sender": user2_id, - "state_key": "", - "type": "m.room.create", - }, - { - "content": {"join_rule": "public"}, - "sender": user2_id, - "state_key": "", - "type": "m.room.join_rules", - }, - { - "content": {"displayname": user2.localpart, "membership": "join"}, - "sender": user2_id, - "state_key": user2_id, - "type": "m.room.member", - }, - { - "content": {"displayname": user1.localpart, "membership": "invite"}, - "sender": user2_id, - "state_key": user1_id, - "type": "m.room.member", - }, - ], - response_body["rooms"][room_id1]["invite_state"], - ) - - def test_rooms_ban_initial_sync(self) -> None: - """ - Test that `rooms` we are banned from in an intial sync only allows us to see - timeline events up to the ban event. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.send(room_id1, "activity before1", tok=user2_tok) - self.helper.send(room_id1, "activity before2", tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok) - event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok) - user1_ban_response = self.helper.ban( - room_id1, src=user2_id, targ=user1_id, tok=user2_tok - ) - - self.helper.send(room_id1, "activity after5", tok=user2_tok) - self.helper.send(room_id1, "activity after6", tok=user2_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 3, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # We should see events before the ban but not after - self.assertEqual( - [ - event["event_id"] - for event in response_body["rooms"][room_id1]["timeline"] - ], - [ - event_response3["event_id"], - event_response4["event_id"], - user1_ban_response["event_id"], - ], - response_body["rooms"][room_id1]["timeline"], - ) - # No "live" events in an initial sync (no `from_token` to define the "live" - # range) - self.assertEqual( - response_body["rooms"][room_id1]["num_live"], - 0, - response_body["rooms"][room_id1], - ) - # There are more events to paginate to - self.assertEqual( - response_body["rooms"][room_id1]["limited"], - True, - response_body["rooms"][room_id1], - ) - - def test_rooms_ban_incremental_sync1(self) -> None: - """ - Test that `rooms` we are banned from during the next incremental sync only - allows us to see timeline events up to the ban event. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.send(room_id1, "activity before1", tok=user2_tok) - self.helper.send(room_id1, "activity before2", tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok) - event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok) - # The ban is within the token range (between the `from_token` and the sliding - # sync request) - user1_ban_response = self.helper.ban( - room_id1, src=user2_id, targ=user1_id, tok=user2_tok - ) - - self.helper.send(room_id1, "activity after5", tok=user2_tok) - self.helper.send(room_id1, "activity after6", tok=user2_tok) - - # Make the incremental Sliding Sync request - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # We should see events before the ban but not after - self.assertEqual( - [ - event["event_id"] - for event in response_body["rooms"][room_id1]["timeline"] - ], - [ - event_response3["event_id"], - event_response4["event_id"], - user1_ban_response["event_id"], - ], - response_body["rooms"][room_id1]["timeline"], - ) - # All live events in the incremental sync - self.assertEqual( - response_body["rooms"][room_id1]["num_live"], - 3, - response_body["rooms"][room_id1], - ) - # There aren't anymore events to paginate to in this range - self.assertEqual( - response_body["rooms"][room_id1]["limited"], - False, - response_body["rooms"][room_id1], - ) - - def test_rooms_ban_incremental_sync2(self) -> None: - """ - Test that `rooms` we are banned from before the incremental sync don't return - any events in the timeline. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.send(room_id1, "activity before1", tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - self.helper.send(room_id1, "activity after2", tok=user2_tok) - # The ban is before we get our `from_token` - self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - - self.helper.send(room_id1, "activity after3", tok=user2_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - self.helper.send(room_id1, "activity after4", tok=user2_tok) - - # Make the incremental Sliding Sync request - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # Nothing to see for this banned user in the room in the token range - self.assertIsNone(response_body["rooms"].get(room_id1)) - - def test_rooms_no_required_state(self) -> None: - """ - Empty `rooms.required_state` should not return any state events in the room - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - # Empty `required_state` - "required_state": [], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # No `required_state` in response - self.assertIsNone( - response_body["rooms"][room_id1].get("required_state"), - response_body["rooms"][room_id1], - ) - - def test_rooms_required_state_initial_sync(self) -> None: - """ - Test `rooms.required_state` returns requested state events in the room during an - initial sync. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.RoomHistoryVisibility, ""], - # This one doesn't exist in the room - [EventTypes.Tombstone, ""], - ], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Create, "")], - state_map[(EventTypes.RoomHistoryVisibility, "")], - }, - exact=True, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - def test_rooms_required_state_incremental_sync(self) -> None: - """ - Test `rooms.required_state` returns requested state events in the room during an - incremental sync. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.RoomHistoryVisibility, ""], - # This one doesn't exist in the room - [EventTypes.Tombstone, ""], - ], - "timeline_limit": 1, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Send a message so the room comes down sync. - self.helper.send(room_id1, "msg", tok=user1_tok) - - # Make the incremental Sliding Sync request - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # We only return updates but only if we've sent the room down the - # connection before. - self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - def test_rooms_required_state_incremental_sync_restart(self) -> None: - """ - Test `rooms.required_state` returns requested state events in the room during an - incremental sync, after a restart (and so the in memory caches are reset). - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.RoomHistoryVisibility, ""], - # This one doesn't exist in the room - [EventTypes.Tombstone, ""], - ], - "timeline_limit": 1, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Reset the in-memory cache - self.hs.get_sliding_sync_handler().connection_store._connections.clear() - - # Make the Sliding Sync request - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # If the cache has been cleared then we do expect the state to come down - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Create, "")], - state_map[(EventTypes.RoomHistoryVisibility, "")], - }, - exact=True, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - def test_rooms_required_state_wildcard(self) -> None: - """ - Test `rooms.required_state` returns all state events when using wildcard `["*", "*"]`. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - self.helper.send_state( - room_id1, - event_type="org.matrix.foo_state", - state_key="", - body={"foo": "bar"}, - tok=user2_tok, - ) - self.helper.send_state( - room_id1, - event_type="org.matrix.foo_state", - state_key="namespaced", - body={"foo": "bar"}, - tok=user2_tok, - ) - - # Make the Sliding Sync request with wildcards for the `event_type` and `state_key` - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [StateValues.WILDCARD, StateValues.WILDCARD], - ], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - # We should see all the state events in the room - state_map.values(), - exact=True, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - def test_rooms_required_state_wildcard_event_type(self) -> None: - """ - Test `rooms.required_state` returns relevant state events when using wildcard in - the event_type `["*", "foobarbaz"]`. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - self.helper.send_state( - room_id1, - event_type="org.matrix.foo_state", - state_key="", - body={"foo": "bar"}, - tok=user2_tok, - ) - self.helper.send_state( - room_id1, - event_type="org.matrix.foo_state", - state_key=user2_id, - body={"foo": "bar"}, - tok=user2_tok, - ) - - # Make the Sliding Sync request with wildcards for the `event_type` - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [StateValues.WILDCARD, user2_id], - ], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - # We expect at-least any state event with the `user2_id` as the `state_key` - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Member, user2_id)], - state_map[("org.matrix.foo_state", user2_id)], - }, - # Ideally, this would be exact but we're currently returning all state - # events when the `event_type` is a wildcard. - exact=False, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - def test_rooms_required_state_wildcard_state_key(self) -> None: - """ - Test `rooms.required_state` returns relevant state events when using wildcard in - the state_key `["foobarbaz","*"]`. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Make the Sliding Sync request with wildcards for the `state_key` - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Member, StateValues.WILDCARD], - ], - "timeline_limit": 0, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Member, user1_id)], - state_map[(EventTypes.Member, user2_id)], - }, - exact=True, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - def test_rooms_required_state_lazy_loading_room_members(self) -> None: - """ - Test `rooms.required_state` returns people relevant to the timeline when - lazy-loading room members, `["m.room.member","$LAZY"]`. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user3_id = self.register_user("user3", "pass") - user3_tok = self.login(user3_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.join(room_id1, user3_id, tok=user3_tok) - - self.helper.send(room_id1, "1", tok=user2_tok) - self.helper.send(room_id1, "2", tok=user3_tok) - self.helper.send(room_id1, "3", tok=user2_tok) - - # Make the Sliding Sync request with lazy loading for the room members - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.Member, StateValues.LAZY], - ], - "timeline_limit": 3, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - # Only user2 and user3 sent events in the 3 events we see in the `timeline` - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Create, "")], - state_map[(EventTypes.Member, user2_id)], - state_map[(EventTypes.Member, user3_id)], - }, - exact=True, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - def test_rooms_required_state_me(self) -> None: - """ - Test `rooms.required_state` correctly handles $ME. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - self.helper.send(room_id1, "1", tok=user2_tok) - - # Also send normal state events with state keys of the users, first - # change the power levels to allow this. - self.helper.send_state( - room_id1, - event_type=EventTypes.PowerLevels, - body={"users": {user1_id: 50, user2_id: 100}}, - tok=user2_tok, - ) - self.helper.send_state( - room_id1, - event_type="org.matrix.foo", - state_key=user1_id, - body={}, - tok=user1_tok, - ) - self.helper.send_state( - room_id1, - event_type="org.matrix.foo", - state_key=user2_id, - body={}, - tok=user2_tok, - ) - - # Make the Sliding Sync request with a request for '$ME'. - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.Member, StateValues.ME], - ["org.matrix.foo", StateValues.ME], - ], - "timeline_limit": 3, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - # Only user2 and user3 sent events in the 3 events we see in the `timeline` - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Create, "")], - state_map[(EventTypes.Member, user1_id)], - state_map[("org.matrix.foo", user1_id)], - }, - exact=True, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) - def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: - """ - Test `rooms.required_state` should not return state past a leave/ban event. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user3_id = self.register_user("user3", "pass") - user3_tok = self.login(user3_id, "pass") - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.Member, "*"], - ["org.matrix.foo_state", ""], - ], - "timeline_limit": 3, - } - } - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.join(room_id1, user3_id, tok=user3_tok) - - self.helper.send_state( - room_id1, - event_type="org.matrix.foo_state", - state_key="", - body={"foo": "bar"}, - tok=user2_tok, - ) - - if stop_membership == Membership.LEAVE: - # User 1 leaves - self.helper.leave(room_id1, user1_id, tok=user1_tok) - elif stop_membership == Membership.BAN: - # User 1 is banned - self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - # Change the state after user 1 leaves - self.helper.send_state( - room_id1, - event_type="org.matrix.foo_state", - state_key="", - body={"foo": "qux"}, - tok=user2_tok, - ) - self.helper.leave(room_id1, user3_id, tok=user3_tok) - - # Make the Sliding Sync request with lazy loading for the room members - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # Only user2 and user3 sent events in the 3 events we see in the `timeline` - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Create, "")], - state_map[(EventTypes.Member, user1_id)], - state_map[(EventTypes.Member, user2_id)], - state_map[(EventTypes.Member, user3_id)], - state_map[("org.matrix.foo_state", "")], - }, - exact=True, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - def test_rooms_required_state_combine_superset(self) -> None: - """ - Test `rooms.required_state` is combined across lists and room subscriptions. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - self.helper.send_state( - room_id1, - event_type="org.matrix.foo_state", - state_key="", - body={"foo": "bar"}, - tok=user2_tok, - ) - self.helper.send_state( - room_id1, - event_type="org.matrix.bar_state", - state_key="", - body={"bar": "qux"}, - tok=user2_tok, - ) - - # Make the Sliding Sync request with wildcards for the `state_key` - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.Member, user1_id], - ], - "timeline_limit": 0, - }, - "bar-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Member, StateValues.WILDCARD], - ["org.matrix.foo_state", ""], - ], - "timeline_limit": 0, - }, - }, - "room_subscriptions": { - room_id1: { - "required_state": [["org.matrix.bar_state", ""]], - "timeline_limit": 0, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Create, "")], - state_map[(EventTypes.Member, user1_id)], - state_map[(EventTypes.Member, user2_id)], - state_map[("org.matrix.foo_state", "")], - state_map[("org.matrix.bar_state", "")], - }, - exact=True, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - def test_rooms_required_state_partial_state(self) -> None: - """ - Test partially-stated room are excluded unless `rooms.required_state` is - lazy-loading room members. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) - _join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) - join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok) - - # Mark room2 as partial state - self.get_success( - mark_event_as_partial_state(self.hs, join_response2["event_id"], room_id2) - ) - - # Make the Sliding Sync request (NOT lazy-loading room members) - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - ], - "timeline_limit": 0, - }, - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Make sure the list includes room1 but room2 is excluded because it's still - # partially-stated - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 1], - "room_ids": [room_id1], - } - ], - response_body["lists"]["foo-list"], - ) - - # Make the Sliding Sync request (with lazy-loading room members) - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - # Lazy-load room members - [EventTypes.Member, StateValues.LAZY], - ], - "timeline_limit": 0, - }, - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # The list should include both rooms now because we're lazy-loading room members - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 1], - "room_ids": [room_id2, room_id1], - } - ], - response_body["lists"]["foo-list"], - ) - - def test_room_subscriptions_with_join_membership(self) -> None: - """ - Test `room_subscriptions` with a joined room should give us timeline and current - state events. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Make the Sliding Sync request with just the room subscription - sync_body = { - "room_subscriptions": { - room_id1: { - "required_state": [ - [EventTypes.Create, ""], - ], - "timeline_limit": 1, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - # We should see some state - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Create, "")], - }, - exact=True, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - # We should see some events - self.assertEqual( - [ - event["event_id"] - for event in response_body["rooms"][room_id1]["timeline"] - ], - [ - join_response["event_id"], - ], - response_body["rooms"][room_id1]["timeline"], - ) - # No "live" events in an initial sync (no `from_token` to define the "live" - # range) - self.assertEqual( - response_body["rooms"][room_id1]["num_live"], - 0, - response_body["rooms"][room_id1], - ) - # There are more events to paginate to - self.assertEqual( - response_body["rooms"][room_id1]["limited"], - True, - response_body["rooms"][room_id1], - ) - - def test_room_subscriptions_with_leave_membership(self) -> None: - """ - Test `room_subscriptions` with a leave room should give us timeline and state - events up to the leave event. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.send_state( - room_id1, - event_type="org.matrix.foo_state", - state_key="", - body={"foo": "bar"}, - tok=user2_tok, - ) - - join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) - leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - # Send some events after user1 leaves - self.helper.send(room_id1, "activity after leave", tok=user2_tok) - # Update state after user1 leaves - self.helper.send_state( - room_id1, - event_type="org.matrix.foo_state", - state_key="", - body={"foo": "qux"}, - tok=user2_tok, - ) - - # Make the Sliding Sync request with just the room subscription - sync_body = { - "room_subscriptions": { - room_id1: { - "required_state": [ - ["org.matrix.foo_state", ""], - ], - "timeline_limit": 2, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # We should see the state at the time of the leave - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[("org.matrix.foo_state", "")], - }, - exact=True, - ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - - # We should see some before we left (nothing after) - self.assertEqual( - [ - event["event_id"] - for event in response_body["rooms"][room_id1]["timeline"] - ], - [ - join_response["event_id"], - leave_response["event_id"], - ], - response_body["rooms"][room_id1]["timeline"], - ) - # No "live" events in an initial sync (no `from_token` to define the "live" - # range) - self.assertEqual( - response_body["rooms"][room_id1]["num_live"], - 0, - response_body["rooms"][room_id1], - ) - # There are more events to paginate to - self.assertEqual( - response_body["rooms"][room_id1]["limited"], - True, - response_body["rooms"][room_id1], - ) - - def test_room_subscriptions_no_leak_private_room(self) -> None: - """ - Test `room_subscriptions` with a private room we have never been in should not - leak any data to the user. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=False) - - # We should not be able to join the private room - self.helper.join( - room_id1, user1_id, tok=user1_tok, expect_code=HTTPStatus.FORBIDDEN - ) - - # Make the Sliding Sync request with just the room subscription - sync_body = { - "room_subscriptions": { - room_id1: { - "required_state": [ - [EventTypes.Create, ""], - ], - "timeline_limit": 1, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # We should not see the room at all (we're not in it) - self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"]) - - def test_room_subscriptions_world_readable(self) -> None: - """ - Test `room_subscriptions` with a room that has `world_readable` history visibility - - FIXME: We should be able to see the room timeline and state - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - # Create a room with `world_readable` history visibility - room_id1 = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - "preset": "public_chat", - "initial_state": [ - { - "content": { - "history_visibility": HistoryVisibility.WORLD_READABLE - }, - "state_key": "", - "type": EventTypes.RoomHistoryVisibility, - } - ], - }, - ) - # Ensure we're testing with a room with `world_readable` history visibility - # which means events are visible to anyone even without membership. - history_visibility_response = self.helper.get_state( - room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok - ) - self.assertEqual( - history_visibility_response.get("history_visibility"), - HistoryVisibility.WORLD_READABLE, - ) - - # Note: We never join the room - - # Make the Sliding Sync request with just the room subscription - sync_body = { - "room_subscriptions": { - room_id1: { - "required_state": [ - [EventTypes.Create, ""], - ], - "timeline_limit": 1, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # FIXME: In the future, we should be able to see the room because it's - # `world_readable` but currently we don't support this. - self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"]) - - # Any extensions that use `lists`/`rooms` should be tested here - @parameterized.expand([("account_data",), ("receipts",)]) - def test_extensions_lists_rooms_relevant_rooms(self, extension_name: str) -> None: - """ - With various extensions, test out requesting different variations of - `lists`/`rooms`. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create some rooms - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok) - room_id4 = self.helper.create_room_as(user1_id, tok=user1_tok) - room_id5 = self.helper.create_room_as(user1_id, tok=user1_tok) - - room_id_to_human_name_map = { - room_id1: "room1", - room_id2: "room2", - room_id3: "room3", - room_id4: "room4", - room_id5: "room5", - } - - for room_id in room_id_to_human_name_map.keys(): - if extension_name == "account_data": - # Add some account data to each room - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id, - account_data_type="org.matrix.roorarraz", - content={"roo": "rar"}, - ) - ) - elif extension_name == "receipts": - event_response = self.helper.send( - room_id, body="new event", tok=user1_tok - ) - # Read last event - channel = self.make_request( - "POST", - f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}", - {}, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - else: - raise AssertionError(f"Unknown extension name: {extension_name}") - - main_sync_body = { - "lists": { - # We expect this list range to include room5 and room4 - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - }, - # We expect this list range to include room5, room4, room3 - "bar-list": { - "ranges": [[0, 2]], - "required_state": [], - "timeline_limit": 0, - }, - }, - "room_subscriptions": { - room_id1: { - "required_state": [], - "timeline_limit": 0, - } - }, - } - - # Mix lists and rooms - sync_body = { - **main_sync_body, - "extensions": { - extension_name: { - "enabled": True, - "lists": ["foo-list", "non-existent-list"], - "rooms": [room_id1, room_id2, "!non-existent-room"], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # room1: ✅ Requested via `rooms` and a room subscription exists - # room2: ❌ Requested via `rooms` but not in the response (from lists or room subscriptions) - # room3: ❌ Not requested - # room4: ✅ Shows up because requested via `lists` and list exists in the response - # room5: ✅ Shows up because requested via `lists` and list exists in the response - self.assertIncludes( - { - room_id_to_human_name_map[room_id] - for room_id in response_body["extensions"][extension_name] - .get("rooms") - .keys() - }, - {"room1", "room4", "room5"}, - exact=True, - ) - - # Try wildcards (this is the default) - sync_body = { - **main_sync_body, - "extensions": { - extension_name: { - "enabled": True, - # "lists": ["*"], - # "rooms": ["*"], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # room1: ✅ Shows up because of default `rooms` wildcard and is in one of the room subscriptions - # room2: ❌ Not requested - # room3: ✅ Shows up because of default `lists` wildcard and is in a list - # room4: ✅ Shows up because of default `lists` wildcard and is in a list - # room5: ✅ Shows up because of default `lists` wildcard and is in a list - self.assertIncludes( - { - room_id_to_human_name_map[room_id] - for room_id in response_body["extensions"][extension_name] - .get("rooms") - .keys() - }, - {"room1", "room3", "room4", "room5"}, - exact=True, - ) - - # Empty list will return nothing - sync_body = { - **main_sync_body, - "extensions": { - extension_name: { - "enabled": True, - "lists": [], - "rooms": [], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # room1: ❌ Not requested - # room2: ❌ Not requested - # room3: ❌ Not requested - # room4: ❌ Not requested - # room5: ❌ Not requested - self.assertIncludes( - { - room_id_to_human_name_map[room_id] - for room_id in response_body["extensions"][extension_name] - .get("rooms") - .keys() - }, - set(), - exact=True, - ) - - # Try wildcard and none - sync_body = { - **main_sync_body, - "extensions": { - extension_name: { - "enabled": True, - "lists": ["*"], - "rooms": [], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # room1: ❌ Not requested - # room2: ❌ Not requested - # room3: ✅ Shows up because of default `lists` wildcard and is in a list - # room4: ✅ Shows up because of default `lists` wildcard and is in a list - # room5: ✅ Shows up because of default `lists` wildcard and is in a list - self.assertIncludes( - { - room_id_to_human_name_map[room_id] - for room_id in response_body["extensions"][extension_name] - .get("rooms") - .keys() - }, - {"room3", "room4", "room5"}, - exact=True, - ) - - # Try requesting a room that is only in a list - sync_body = { - **main_sync_body, - "extensions": { - extension_name: { - "enabled": True, - "lists": [], - "rooms": [room_id5], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # room1: ❌ Not requested - # room2: ❌ Not requested - # room3: ❌ Not requested - # room4: ❌ Not requested - # room5: ✅ Requested via `rooms` and is in a list - self.assertIncludes( - { - room_id_to_human_name_map[room_id] - for room_id in response_body["extensions"][extension_name] - .get("rooms") - .keys() - }, - {"room5"}, - exact=True, - ) - - def test_rooms_required_state_incremental_sync_LIVE(self) -> None: - """Test that we only get state updates in incremental sync for rooms - we've already seen (LIVE). - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.RoomHistoryVisibility, ""], - # This one doesn't exist in the room - [EventTypes.Name, ""], - ], - "timeline_limit": 0, - } - } - } - - response_body, from_token = self.do_sync(sync_body, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Create, "")], - state_map[(EventTypes.RoomHistoryVisibility, "")], - }, - exact=True, - ) - - # Send a state event - self.helper.send_state( - room_id1, EventTypes.Name, body={"name": "foo"}, tok=user2_tok - ) - - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - self.assertNotIn("initial", response_body["rooms"][room_id1]) - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Name, "")], - }, - exact=True, - ) - - @parameterized.expand([(False,), (True,)]) - def test_rooms_timeline_incremental_sync_PREVIOUSLY(self, limited: bool) -> None: - """ - Test getting room data where we have previously sent down the room, but - we missed sending down some timeline events previously and so its status - is considered PREVIOUSLY. - - There are two versions of this test, one where there are more messages - than the timeline limit, and one where there isn't. - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - - self.helper.send(room_id1, "msg", tok=user1_tok) - - timeline_limit = 5 - conn_id = "conn_id" - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 0]], - "required_state": [], - "timeline_limit": timeline_limit, - } - }, - "conn_id": "conn_id", - } - - # The first room gets sent down the initial sync - response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok) - self.assertCountEqual( - response_body["rooms"].keys(), {room_id1}, response_body["rooms"] - ) - - # We now send down some events in room1 (depending on the test param). - expected_events = [] # The set of events in the timeline - if limited: - for _ in range(10): - resp = self.helper.send(room_id1, "msg1", tok=user1_tok) - expected_events.append(resp["event_id"]) - else: - resp = self.helper.send(room_id1, "msg1", tok=user1_tok) - expected_events.append(resp["event_id"]) - - # A second messages happens in the other room, so room1 won't get sent down. - self.helper.send(room_id2, "msg", tok=user1_tok) - - # Only the second room gets sent down sync. - response_body, from_token = self.do_sync( - sync_body, since=initial_from_token, tok=user1_tok - ) - - self.assertCountEqual( - response_body["rooms"].keys(), {room_id2}, response_body["rooms"] - ) - - # FIXME: This is a hack to record that the first room wasn't sent down - # sync, as we don't implement that currently. - sliding_sync_handler = self.hs.get_sliding_sync_handler() - requester = self.get_success( - self.hs.get_auth().get_user_by_access_token(user1_tok) - ) - sync_config = SlidingSyncConfig( - user=requester.user, - requester=requester, - conn_id=conn_id, - ) - - parsed_initial_from_token = self.get_success( - SlidingSyncStreamToken.from_string(self.store, initial_from_token) - ) - connection_position = self.get_success( - sliding_sync_handler.connection_store.record_rooms( - sync_config, - parsed_initial_from_token, - sent_room_ids=[], - unsent_room_ids=[room_id1], - ) - ) - - # FIXME: Now fix up `from_token` with new connect position above. - parsed_from_token = self.get_success( - SlidingSyncStreamToken.from_string(self.store, from_token) - ) - parsed_from_token = SlidingSyncStreamToken( - stream_token=parsed_from_token.stream_token, - connection_position=connection_position, - ) - from_token = self.get_success(parsed_from_token.to_string(self.store)) - - # We now send another event to room1, so we should sync all the missing events. - resp = self.helper.send(room_id1, "msg2", tok=user1_tok) - expected_events.append(resp["event_id"]) - - # This sync should contain the messages from room1 not yet sent down. - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - self.assertCountEqual( - response_body["rooms"].keys(), {room_id1}, response_body["rooms"] - ) - self.assertNotIn("initial", response_body["rooms"][room_id1]) - - self.assertEqual( - [ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]], - expected_events[-timeline_limit:], - ) - self.assertEqual(response_body["rooms"][room_id1]["limited"], limited) - self.assertEqual(response_body["rooms"][room_id1].get("required_state"), None) - - def test_rooms_required_state_incremental_sync_PREVIOUSLY(self) -> None: - """ - Test getting room data where we have previously sent down the room, but - we missed sending down some state previously and so its status is - considered PREVIOUSLY. - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - - self.helper.send(room_id1, "msg", tok=user1_tok) - - conn_id = "conn_id" - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 0]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.RoomHistoryVisibility, ""], - # This one doesn't exist in the room - [EventTypes.Name, ""], - ], - "timeline_limit": 0, - } - }, - "conn_id": "conn_id", - } - - # The first room gets sent down the initial sync - response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok) - self.assertCountEqual( - response_body["rooms"].keys(), {room_id1}, response_body["rooms"] - ) - - # We now send down some state in room1 - resp = self.helper.send_state( - room_id1, EventTypes.Name, {"name": "foo"}, tok=user1_tok - ) - name_change_id = resp["event_id"] - - # A second messages happens in the other room, so room1 won't get sent down. - self.helper.send(room_id2, "msg", tok=user1_tok) - - # Only the second room gets sent down sync. - response_body, from_token = self.do_sync( - sync_body, since=initial_from_token, tok=user1_tok - ) - - self.assertCountEqual( - response_body["rooms"].keys(), {room_id2}, response_body["rooms"] - ) - - # FIXME: This is a hack to record that the first room wasn't sent down - # sync, as we don't implement that currently. - sliding_sync_handler = self.hs.get_sliding_sync_handler() - requester = self.get_success( - self.hs.get_auth().get_user_by_access_token(user1_tok) - ) - sync_config = SlidingSyncConfig( - user=requester.user, - requester=requester, - conn_id=conn_id, - ) - - parsed_initial_from_token = self.get_success( - SlidingSyncStreamToken.from_string(self.store, initial_from_token) - ) - connection_position = self.get_success( - sliding_sync_handler.connection_store.record_rooms( - sync_config, - parsed_initial_from_token, - sent_room_ids=[], - unsent_room_ids=[room_id1], - ) - ) - - # FIXME: Now fix up `from_token` with new connect position above. - parsed_from_token = self.get_success( - SlidingSyncStreamToken.from_string(self.store, from_token) - ) - parsed_from_token = SlidingSyncStreamToken( - stream_token=parsed_from_token.stream_token, - connection_position=connection_position, - ) - from_token = self.get_success(parsed_from_token.to_string(self.store)) - - # We now send another event to room1, so we should sync all the missing state. - self.helper.send(room_id1, "msg", tok=user1_tok) - - # This sync should contain the state changes from room1. - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - self.assertCountEqual( - response_body["rooms"].keys(), {room_id1}, response_body["rooms"] - ) - self.assertNotIn("initial", response_body["rooms"][room_id1]) - - # We should only see the name change. - self.assertEqual( - [ - ev["event_id"] - for ev in response_body["rooms"][room_id1]["required_state"] - ], - [name_change_id], - ) - - def test_rooms_required_state_incremental_sync_NEVER(self) -> None: - """ - Test getting `required_state` where we have NEVER sent down the room before - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - - self.helper.send(room_id1, "msg", tok=user1_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 0]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.RoomHistoryVisibility, ""], - # This one doesn't exist in the room - [EventTypes.Name, ""], - ], - "timeline_limit": 1, - } - }, - } - - # A message happens in the other room, so room1 won't get sent down. - self.helper.send(room_id2, "msg", tok=user1_tok) - - # Only the second room gets sent down sync. - response_body, from_token = self.do_sync(sync_body, tok=user1_tok) - - self.assertCountEqual( - response_body["rooms"].keys(), {room_id2}, response_body["rooms"] - ) - - # We now send another event to room1, so we should send down the full - # room. - self.helper.send(room_id1, "msg2", tok=user1_tok) - - # This sync should contain the messages from room1 not yet sent down. - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - self.assertCountEqual( - response_body["rooms"].keys(), {room_id1}, response_body["rooms"] - ) - - self.assertEqual(response_body["rooms"][room_id1]["initial"], True) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) - ) - - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Create, "")], - state_map[(EventTypes.RoomHistoryVisibility, "")], - }, - exact=True, - ) - - def test_rooms_timeline_incremental_sync_NEVER(self) -> None: - """ - Test getting timeline room data where we have NEVER sent down the room - before - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 0]], - "required_state": [], - "timeline_limit": 5, - } - }, - } - - expected_events = [] - for _ in range(4): - resp = self.helper.send(room_id1, "msg", tok=user1_tok) - expected_events.append(resp["event_id"]) - - # A message happens in the other room, so room1 won't get sent down. - self.helper.send(room_id2, "msg", tok=user1_tok) - - # Only the second room gets sent down sync. - response_body, from_token = self.do_sync(sync_body, tok=user1_tok) - - self.assertCountEqual( - response_body["rooms"].keys(), {room_id2}, response_body["rooms"] - ) - - # We now send another event to room1 so it comes down sync - resp = self.helper.send(room_id1, "msg2", tok=user1_tok) - expected_events.append(resp["event_id"]) - - # This sync should contain the messages from room1 not yet sent down. - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - self.assertCountEqual( - response_body["rooms"].keys(), {room_id1}, response_body["rooms"] - ) - - self.assertEqual( - [ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]], - expected_events, - ) - self.assertEqual(response_body["rooms"][room_id1]["limited"], True) - self.assertEqual(response_body["rooms"][room_id1]["initial"], True) - - def test_rooms_with_no_updates_do_not_come_down_incremental_sync(self) -> None: - """ - Test that rooms with no updates are returned in subsequent incremental - syncs. - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } - } - } - - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make the incremental Sliding Sync request - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # Nothing has happened in the room, so the room should not come down - # /sync. - self.assertIsNone(response_body["rooms"].get(room_id1)) - - def test_empty_initial_room_comes_down_sync(self) -> None: - """ - Test that rooms come down /sync even with empty required state and - timeline limit in initial sync. - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 0, - } - } - } - - # Make the Sliding Sync request - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - self.assertEqual(response_body["rooms"][room_id1]["initial"], True) - - -class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): - """Tests for the to-device sliding sync extension""" - - servlets = [ - synapse.rest.admin.register_servlets, - login.register_servlets, - sync.register_servlets, - sendtodevice.register_servlets, - ] - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - - def _assert_to_device_response( - self, response_body: JsonDict, expected_messages: List[JsonDict] - ) -> str: - """Assert the sliding sync response was successful and has the expected - to-device messages. - - Returns the next_batch token from the to-device section. - """ - extensions = response_body["extensions"] - to_device = extensions["to_device"] - self.assertIsInstance(to_device["next_batch"], str) - self.assertEqual(to_device["events"], expected_messages) - - return to_device["next_batch"] - - def test_no_data(self) -> None: - """Test that enabling to-device extension works, even if there is - no-data - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - sync_body = { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # We expect no to-device messages - self._assert_to_device_response(response_body, []) - - def test_data_initial_sync(self) -> None: - """Test that we get to-device messages when we don't specify a since - token""" - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass", "d1") - user2_id = self.register_user("u2", "pass") - user2_tok = self.login(user2_id, "pass", "d2") - - # Send the to-device message - test_msg = {"foo": "bar"} - chan = self.make_request( - "PUT", - "/_matrix/client/r0/sendToDevice/m.test/1234", - content={"messages": {user1_id: {"d1": test_msg}}}, - access_token=user2_tok, - ) - self.assertEqual(chan.code, 200, chan.result) - - sync_body = { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - self._assert_to_device_response( - response_body, - [{"content": test_msg, "sender": user2_id, "type": "m.test"}], - ) - - def test_data_incremental_sync(self) -> None: - """Test that we get to-device messages over incremental syncs""" - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass", "d1") - user2_id = self.register_user("u2", "pass") - user2_tok = self.login(user2_id, "pass", "d2") - - sync_body: JsonDict = { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - # No to-device messages yet. - next_batch = self._assert_to_device_response(response_body, []) - - test_msg = {"foo": "bar"} - chan = self.make_request( - "PUT", - "/_matrix/client/r0/sendToDevice/m.test/1234", - content={"messages": {user1_id: {"d1": test_msg}}}, - access_token=user2_tok, - ) - self.assertEqual(chan.code, 200, chan.result) - - sync_body = { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - "since": next_batch, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - next_batch = self._assert_to_device_response( - response_body, - [{"content": test_msg, "sender": user2_id, "type": "m.test"}], - ) - - # The next sliding sync request should not include the to-device - # message. - sync_body = { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - "since": next_batch, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - self._assert_to_device_response(response_body, []) - - # An initial sliding sync request should not include the to-device - # message, as it should have been deleted - sync_body = { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - self._assert_to_device_response(response_body, []) - - def test_wait_for_new_data(self) -> None: - """ - Test to make sure that the Sliding Sync request waits for new data to arrive. - - (Only applies to incremental syncs with a `timeout` specified) - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass", "d1") - user2_id = self.register_user("u2", "pass") - user2_tok = self.login(user2_id, "pass", "d2") - - sync_body = { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=5000) - # Bump the to-device messages to trigger new results - test_msg = {"foo": "bar"} - send_to_device_channel = self.make_request( - "PUT", - "/_matrix/client/r0/sendToDevice/m.test/1234", - content={"messages": {user1_id: {"d1": test_msg}}}, - access_token=user2_tok, - ) - self.assertEqual( - send_to_device_channel.code, 200, send_to_device_channel.result - ) - # Should respond before the 10 second timeout - channel.await_result(timeout_ms=3000) - self.assertEqual(channel.code, 200, channel.json_body) - - self._assert_to_device_response( - channel.json_body, - [{"content": test_msg, "sender": user2_id, "type": "m.test"}], - ) - - def test_wait_for_new_data_timeout(self) -> None: - """ - Test to make sure that the Sliding Sync request waits for new data to arrive but - no data ever arrives so we timeout. We're also making sure that the default data - from the To-Device extension doesn't trigger a false-positive for new data. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - sync_body = { - "lists": {}, - "extensions": { - "to_device": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=5000) - # Wake-up `notifier.wait_for_events(...)` that will cause us test - # `SlidingSyncResult.__bool__` for new results. - self._bump_notifier_wait_for_events( - user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA - ) - # Block for a little bit more to ensure we don't see any new results. - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=4000) - # Wait for the sync to complete (wait for the rest of the 10 second timeout, - # 5000 + 4000 + 1200 > 10000) - channel.await_result(timeout_ms=1200) - self.assertEqual(channel.code, 200, channel.json_body) - - self._assert_to_device_response(channel.json_body, []) - - -class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): - """Tests for the e2ee sliding sync extension""" - - servlets = [ - synapse.rest.admin.register_servlets, - login.register_servlets, - room.register_servlets, - sync.register_servlets, - devices.register_servlets, - ] - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - self.e2e_keys_handler = hs.get_e2e_keys_handler() - - def test_no_data_initial_sync(self) -> None: - """ - Test that enabling e2ee extension works during an intitial sync, even if there - is no-data - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Make an initial Sliding Sync request with the e2ee extension enabled - sync_body = { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Device list updates are only present for incremental syncs - self.assertIsNone(response_body["extensions"]["e2ee"].get("device_lists")) - - # Both of these should be present even when empty - self.assertEqual( - response_body["extensions"]["e2ee"]["device_one_time_keys_count"], - { - # This is always present because of - # https://github.com/element-hq/element-android/issues/3725 and - # https://github.com/matrix-org/synapse/issues/10456 - "signed_curve25519": 0 - }, - ) - self.assertEqual( - response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], - [], - ) - - def test_no_data_incremental_sync(self) -> None: - """ - Test that enabling e2ee extension works during an incremental sync, even if - there is no-data - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - sync_body = { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make an incremental Sliding Sync request with the e2ee extension enabled - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # Device list shows up for incremental syncs - self.assertEqual( - response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"), - [], - ) - self.assertEqual( - response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), - [], - ) - - # Both of these should be present even when empty - self.assertEqual( - response_body["extensions"]["e2ee"]["device_one_time_keys_count"], - { - # Note that "signed_curve25519" is always returned in key count responses - # regardless of whether we uploaded any keys for it. This is necessary until - # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. - # - # Also related: - # https://github.com/element-hq/element-android/issues/3725 and - # https://github.com/matrix-org/synapse/issues/10456 - "signed_curve25519": 0 - }, - ) - self.assertEqual( - response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], - [], - ) - - def test_wait_for_new_data(self) -> None: - """ - Test to make sure that the Sliding Sync request waits for new data to arrive. - - (Only applies to incremental syncs with a `timeout` specified) - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - test_device_id = "TESTDEVICE" - user3_id = self.register_user("user3", "pass") - user3_tok = self.login(user3_id, "pass", device_id=test_device_id) - - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - self.helper.join(room_id, user3_id, tok=user3_tok) - - sync_body = { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=5000) - # Bump the device lists to trigger new results - # Have user3 update their device list - device_update_channel = self.make_request( - "PUT", - f"/devices/{test_device_id}", - { - "display_name": "New Device Name", - }, - access_token=user3_tok, - ) - self.assertEqual( - device_update_channel.code, 200, device_update_channel.json_body - ) - # Should respond before the 10 second timeout - channel.await_result(timeout_ms=3000) - self.assertEqual(channel.code, 200, channel.json_body) - - # We should see the device list update - self.assertEqual( - channel.json_body["extensions"]["e2ee"] - .get("device_lists", {}) - .get("changed"), - [user3_id], - ) - self.assertEqual( - channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), - [], - ) - - def test_wait_for_new_data_timeout(self) -> None: - """ - Test to make sure that the Sliding Sync request waits for new data to arrive but - no data ever arrives so we timeout. We're also making sure that the default data - from the E2EE extension doesn't trigger a false-positive for new data (see - `device_one_time_keys_count.signed_curve25519`). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - sync_body = { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?timeout=10000&pos={from_token}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=5000) - # Wake-up `notifier.wait_for_events(...)` that will cause us test - # `SlidingSyncResult.__bool__` for new results. - self._bump_notifier_wait_for_events( - user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA - ) - # Block for a little bit more to ensure we don't see any new results. - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=4000) - # Wait for the sync to complete (wait for the rest of the 10 second timeout, - # 5000 + 4000 + 1200 > 10000) - channel.await_result(timeout_ms=1200) - self.assertEqual(channel.code, 200, channel.json_body) - - # Device lists are present for incremental syncs but empty because no device changes - self.assertEqual( - channel.json_body["extensions"]["e2ee"] - .get("device_lists", {}) - .get("changed"), - [], - ) - self.assertEqual( - channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), - [], - ) - - # Both of these should be present even when empty - self.assertEqual( - channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], - { - # Note that "signed_curve25519" is always returned in key count responses - # regardless of whether we uploaded any keys for it. This is necessary until - # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. - # - # Also related: - # https://github.com/element-hq/element-android/issues/3725 and - # https://github.com/matrix-org/synapse/issues/10456 - "signed_curve25519": 0 - }, - ) - self.assertEqual( - channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], - [], - ) - - def test_device_lists(self) -> None: - """ - Test that device list updates are included in the response - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - test_device_id = "TESTDEVICE" - user3_id = self.register_user("user3", "pass") - user3_tok = self.login(user3_id, "pass", device_id=test_device_id) - - user4_id = self.register_user("user4", "pass") - user4_tok = self.login(user4_id, "pass") - - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - self.helper.join(room_id, user3_id, tok=user3_tok) - self.helper.join(room_id, user4_id, tok=user4_tok) - - sync_body = { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Have user3 update their device list - channel = self.make_request( - "PUT", - f"/devices/{test_device_id}", - { - "display_name": "New Device Name", - }, - access_token=user3_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - - # User4 leaves the room - self.helper.leave(room_id, user4_id, tok=user4_tok) - - # Make an incremental Sliding Sync request with the e2ee extension enabled - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # Device list updates show up - self.assertEqual( - response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"), - [user3_id], - ) - self.assertEqual( - response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), - [user4_id], - ) - - def test_device_one_time_keys_count(self) -> None: - """ - Test that `device_one_time_keys_count` are included in the response - """ - test_device_id = "TESTDEVICE" - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass", device_id=test_device_id) - - # Upload one time keys for the user/device - keys: JsonDict = { - "alg1:k1": "key1", - "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}}, - "alg2:k3": {"key": "key3"}, - } - upload_keys_response = self.get_success( - self.e2e_keys_handler.upload_keys_for_user( - user1_id, test_device_id, {"one_time_keys": keys} - ) - ) - self.assertDictEqual( - upload_keys_response, - { - "one_time_key_counts": { - "alg1": 1, - "alg2": 2, - # Note that "signed_curve25519" is always returned in key count responses - # regardless of whether we uploaded any keys for it. This is necessary until - # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. - # - # Also related: - # https://github.com/element-hq/element-android/issues/3725 and - # https://github.com/matrix-org/synapse/issues/10456 - "signed_curve25519": 0, - } - }, - ) - - # Make a Sliding Sync request with the e2ee extension enabled - sync_body = { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Check for those one time key counts - self.assertEqual( - response_body["extensions"]["e2ee"].get("device_one_time_keys_count"), - { - "alg1": 1, - "alg2": 2, - # Note that "signed_curve25519" is always returned in key count responses - # regardless of whether we uploaded any keys for it. This is necessary until - # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. - # - # Also related: - # https://github.com/element-hq/element-android/issues/3725 and - # https://github.com/matrix-org/synapse/issues/10456 - "signed_curve25519": 0, - }, - ) - - def test_device_unused_fallback_key_types(self) -> None: - """ - Test that `device_unused_fallback_key_types` are included in the response - """ - test_device_id = "TESTDEVICE" - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass", device_id=test_device_id) - - # We shouldn't have any unused fallback keys yet - res = self.get_success( - self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id) - ) - self.assertEqual(res, []) - - # Upload a fallback key for the user/device - self.get_success( - self.e2e_keys_handler.upload_keys_for_user( - user1_id, - test_device_id, - {"fallback_keys": {"alg1:k1": "fallback_key1"}}, - ) - ) - # We should now have an unused alg1 key - fallback_res = self.get_success( - self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id) - ) - self.assertEqual(fallback_res, ["alg1"], fallback_res) - - # Make a Sliding Sync request with the e2ee extension enabled - sync_body = { - "lists": {}, - "extensions": { - "e2ee": { - "enabled": True, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Check for the unused fallback key types - self.assertListEqual( - response_body["extensions"]["e2ee"].get("device_unused_fallback_key_types"), - ["alg1"], - ) - - -class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): - """Tests for the account_data sliding sync extension""" - - servlets = [ - synapse.rest.admin.register_servlets, - login.register_servlets, - room.register_servlets, - sync.register_servlets, - sendtodevice.register_servlets, - ] - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - self.account_data_handler = hs.get_account_data_handler() - - def test_no_data_initial_sync(self) -> None: - """ - Test that enabling the account_data extension works during an intitial sync, - even if there is no-data. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Make an initial Sliding Sync request with the account_data extension enabled - sync_body = { - "lists": {}, - "extensions": { - "account_data": { - "enabled": True, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, - # Even though we don't have any global account data set, Synapse saves some - # default push rules for us. - {AccountDataTypes.PUSH_RULES}, - exact=True, - ) - self.assertIncludes( - response_body["extensions"]["account_data"].get("rooms").keys(), - set(), - exact=True, - ) - - def test_no_data_incremental_sync(self) -> None: - """ - Test that enabling account_data extension works during an incremental sync, even - if there is no-data. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - sync_body = { - "lists": {}, - "extensions": { - "account_data": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make an incremental Sliding Sync request with the account_data extension enabled - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # There has been no account data changes since the `from_token` so we shouldn't - # see any account data here. - self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, - set(), - exact=True, - ) - self.assertIncludes( - response_body["extensions"]["account_data"].get("rooms").keys(), - set(), - exact=True, - ) - - def test_global_account_data_initial_sync(self) -> None: - """ - On initial sync, we should return all global account data on initial sync. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Update the global account data - self.get_success( - self.account_data_handler.add_account_data_for_user( - user_id=user1_id, - account_data_type="org.matrix.foobarbaz", - content={"foo": "bar"}, - ) - ) - - # Make an initial Sliding Sync request with the account_data extension enabled - sync_body = { - "lists": {}, - "extensions": { - "account_data": { - "enabled": True, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # It should show us all of the global account data - self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, - {AccountDataTypes.PUSH_RULES, "org.matrix.foobarbaz"}, - exact=True, - ) - self.assertIncludes( - response_body["extensions"]["account_data"].get("rooms").keys(), - set(), - exact=True, - ) - - def test_global_account_data_incremental_sync(self) -> None: - """ - On incremental sync, we should only account data that has changed since the - `from_token`. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Add some global account data - self.get_success( - self.account_data_handler.add_account_data_for_user( - user_id=user1_id, - account_data_type="org.matrix.foobarbaz", - content={"foo": "bar"}, - ) - ) - - sync_body = { - "lists": {}, - "extensions": { - "account_data": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Add some other global account data - self.get_success( - self.account_data_handler.add_account_data_for_user( - user_id=user1_id, - account_data_type="org.matrix.doodardaz", - content={"doo": "dar"}, - ) - ) - - # Make an incremental Sliding Sync request with the account_data extension enabled - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, - # We should only see the new global account data that happened after the `from_token` - {"org.matrix.doodardaz"}, - exact=True, - ) - self.assertIncludes( - response_body["extensions"]["account_data"].get("rooms").keys(), - set(), - exact=True, - ) - - def test_room_account_data_initial_sync(self) -> None: - """ - On initial sync, we return all account data for a given room but only for - rooms that we request and are being returned in the Sliding Sync response. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a room and add some room account data - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id1, - account_data_type="org.matrix.roorarraz", - content={"roo": "rar"}, - ) - ) - - # Create another room with some room account data - room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id2, - account_data_type="org.matrix.roorarraz", - content={"roo": "rar"}, - ) - ) - - # Make an initial Sliding Sync request with the account_data extension enabled - sync_body = { - "lists": {}, - "room_subscriptions": { - room_id1: { - "required_state": [], - "timeline_limit": 0, - } - }, - "extensions": { - "account_data": { - "enabled": True, - "rooms": [room_id1, room_id2], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - self.assertIsNotNone(response_body["extensions"]["account_data"].get("global")) - # Even though we requested room2, we only expect room1 to show up because that's - # the only room in the Sliding Sync response (room2 is not one of our room - # subscriptions or in a sliding window list). - self.assertIncludes( - response_body["extensions"]["account_data"].get("rooms").keys(), - {room_id1}, - exact=True, - ) - self.assertIncludes( - { - event["type"] - for event in response_body["extensions"]["account_data"] - .get("rooms") - .get(room_id1) - }, - {"org.matrix.roorarraz"}, - exact=True, - ) - - def test_room_account_data_incremental_sync(self) -> None: - """ - On incremental sync, we return all account data for a given room but only for - rooms that we request and are being returned in the Sliding Sync response. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a room and add some room account data - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id1, - account_data_type="org.matrix.roorarraz", - content={"roo": "rar"}, - ) - ) - - # Create another room with some room account data - room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id2, - account_data_type="org.matrix.roorarraz", - content={"roo": "rar"}, - ) - ) - - sync_body = { - "lists": {}, - "room_subscriptions": { - room_id1: { - "required_state": [], - "timeline_limit": 0, - } - }, - "extensions": { - "account_data": { - "enabled": True, - "rooms": [room_id1, room_id2], - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Add some other room account data - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id1, - account_data_type="org.matrix.roorarraz2", - content={"roo": "rar"}, - ) - ) - self.get_success( - self.account_data_handler.add_account_data_to_room( - user_id=user1_id, - room_id=room_id2, - account_data_type="org.matrix.roorarraz2", - content={"roo": "rar"}, - ) - ) - - # Make an incremental Sliding Sync request with the account_data extension enabled - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - self.assertIsNotNone(response_body["extensions"]["account_data"].get("global")) - # Even though we requested room2, we only expect room1 to show up because that's - # the only room in the Sliding Sync response (room2 is not one of our room - # subscriptions or in a sliding window list). - self.assertIncludes( - response_body["extensions"]["account_data"].get("rooms").keys(), - {room_id1}, - exact=True, - ) - # We should only see the new room account data that happened after the `from_token` - self.assertIncludes( - { - event["type"] - for event in response_body["extensions"]["account_data"] - .get("rooms") - .get(room_id1) - }, - {"org.matrix.roorarraz2"}, - exact=True, - ) - - def test_wait_for_new_data(self) -> None: - """ - Test to make sure that the Sliding Sync request waits for new data to arrive. - - (Only applies to incremental syncs with a `timeout` specified) - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - - sync_body = { - "lists": {}, - "extensions": { - "account_data": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make an incremental Sliding Sync request with the account_data extension enabled - channel = self.make_request( - "POST", - self.sync_endpoint + f"?timeout=10000&pos={from_token}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=5000) - # Bump the global account data to trigger new results - self.get_success( - self.account_data_handler.add_account_data_for_user( - user1_id, - "org.matrix.foobarbaz", - {"foo": "bar"}, - ) - ) - # Should respond before the 10 second timeout - channel.await_result(timeout_ms=3000) - self.assertEqual(channel.code, 200, channel.json_body) - - # We should see the global account data update - self.assertIncludes( - { - global_event["type"] - for global_event in channel.json_body["extensions"]["account_data"].get( - "global" - ) - }, - {"org.matrix.foobarbaz"}, - exact=True, - ) - self.assertIncludes( - channel.json_body["extensions"]["account_data"].get("rooms").keys(), - set(), - exact=True, - ) - - def test_wait_for_new_data_timeout(self) -> None: - """ - Test to make sure that the Sliding Sync request waits for new data to arrive but - no data ever arrives so we timeout. We're also making sure that the default data - from the account_data extension doesn't trigger a false-positive for new data. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - sync_body = { - "lists": {}, - "extensions": { - "account_data": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?timeout=10000&pos={from_token}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=5000) - # Wake-up `notifier.wait_for_events(...)` that will cause us test - # `SlidingSyncResult.__bool__` for new results. - self._bump_notifier_wait_for_events( - user1_id, - # We choose `StreamKeyType.PRESENCE` because we're testing for account data - # and don't want to contaminate the account data results using - # `StreamKeyType.ACCOUNT_DATA`. - wake_stream_key=StreamKeyType.PRESENCE, - ) - # Block for a little bit more to ensure we don't see any new results. - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=4000) - # Wait for the sync to complete (wait for the rest of the 10 second timeout, - # 5000 + 4000 + 1200 > 10000) - channel.await_result(timeout_ms=1200) - self.assertEqual(channel.code, 200, channel.json_body) - - self.assertIsNotNone( - channel.json_body["extensions"]["account_data"].get("global") - ) - self.assertIsNotNone( - channel.json_body["extensions"]["account_data"].get("rooms") - ) - - -class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): - """Tests for the receipts sliding sync extension""" - - servlets = [ - synapse.rest.admin.register_servlets, - login.register_servlets, - room.register_servlets, - sync.register_servlets, - receipts.register_servlets, - ] - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - - def test_no_data_initial_sync(self) -> None: - """ - Test that enabling the receipts extension works during an intitial sync, - even if there is no-data. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Make an initial Sliding Sync request with the receipts extension enabled - sync_body = { - "lists": {}, - "extensions": { - "receipts": { - "enabled": True, - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - self.assertIncludes( - response_body["extensions"]["receipts"].get("rooms").keys(), - set(), - exact=True, - ) - - def test_no_data_incremental_sync(self) -> None: - """ - Test that enabling receipts extension works during an incremental sync, even - if there is no-data. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - sync_body = { - "lists": {}, - "extensions": { - "receipts": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make an incremental Sliding Sync request with the receipts extension enabled - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - self.assertIncludes( - response_body["extensions"]["receipts"].get("rooms").keys(), - set(), - exact=True, - ) - - def test_receipts_initial_sync_with_timeline(self) -> None: - """ - On initial sync, we only return receipts for events in a given room's timeline. - - We also make sure that we only return receipts for rooms that we request and are - already being returned in the Sliding Sync response. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user3_id = self.register_user("user3", "pass") - user3_tok = self.login(user3_id, "pass") - user4_id = self.register_user("user4", "pass") - user4_tok = self.login(user4_id, "pass") - - # Create a room - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.join(room_id1, user3_id, tok=user3_tok) - self.helper.join(room_id1, user4_id, tok=user4_tok) - room1_event_response1 = self.helper.send( - room_id1, body="new event1", tok=user2_tok - ) - room1_event_response2 = self.helper.send( - room_id1, body="new event2", tok=user2_tok - ) - # User1 reads the last event - channel = self.make_request( - "POST", - f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}", - {}, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - # User2 reads the last event - channel = self.make_request( - "POST", - f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}", - {}, - access_token=user2_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - # User3 reads the first event - channel = self.make_request( - "POST", - f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", - {}, - access_token=user3_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - # User4 privately reads the last event (make sure this doesn't leak to the other users) - channel = self.make_request( - "POST", - f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ_PRIVATE}/{room1_event_response2['event_id']}", - {}, - access_token=user4_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - - # Create another room - room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id2, user1_id, tok=user1_tok) - self.helper.join(room_id2, user3_id, tok=user3_tok) - self.helper.join(room_id2, user4_id, tok=user4_tok) - room2_event_response1 = self.helper.send( - room_id2, body="new event2", tok=user2_tok - ) - # User1 reads the last event - channel = self.make_request( - "POST", - f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}", - {}, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - # User2 reads the last event - channel = self.make_request( - "POST", - f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}", - {}, - access_token=user2_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - # User4 privately reads the last event (make sure this doesn't leak to the other users) - channel = self.make_request( - "POST", - f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}", - {}, - access_token=user4_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - - # Make an initial Sliding Sync request with the receipts extension enabled - sync_body = { - "lists": {}, - "room_subscriptions": { - room_id1: { - "required_state": [], - # On initial sync, we only have receipts for events in the timeline - "timeline_limit": 1, - } - }, - "extensions": { - "receipts": { - "enabled": True, - "rooms": [room_id1, room_id2], - } - }, - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Only the latest event in the room is in the timelie because the `timeline_limit` is 1 - self.assertIncludes( - { - event["event_id"] - for event in response_body["rooms"][room_id1].get("timeline", []) - }, - {room1_event_response2["event_id"]}, - exact=True, - message=str(response_body["rooms"][room_id1]), - ) - - # Even though we requested room2, we only expect room1 to show up because that's - # the only room in the Sliding Sync response (room2 is not one of our room - # subscriptions or in a sliding window list). - self.assertIncludes( - response_body["extensions"]["receipts"].get("rooms").keys(), - {room_id1}, - exact=True, - ) - # Sanity check that it's the correct ephemeral event type - self.assertEqual( - response_body["extensions"]["receipts"]["rooms"][room_id1]["type"], - EduTypes.RECEIPT, - ) - # We can see user1 and user2 read receipts - self.assertIncludes( - response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ - room1_event_response2["event_id"] - ][ReceiptTypes.READ].keys(), - {user1_id, user2_id}, - exact=True, - ) - # User1 did not have a private read receipt and we shouldn't leak others' - # private read receipts - self.assertIncludes( - response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ - room1_event_response2["event_id"] - ] - .get(ReceiptTypes.READ_PRIVATE, {}) - .keys(), - set(), - exact=True, - ) - - # We shouldn't see receipts for event2 since it wasn't in the timeline and this is an initial sync - self.assertIsNone( - response_body["extensions"]["receipts"]["rooms"][room_id1]["content"].get( - room1_event_response1["event_id"] - ) - ) - - def test_receipts_incremental_sync(self) -> None: - """ - On incremental sync, we return all receipts in the token range for a given room - but only for rooms that we request and are being returned in the Sliding Sync - response. - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - user3_id = self.register_user("user3", "pass") - user3_tok = self.login(user3_id, "pass") - - # Create room1 - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.join(room_id1, user3_id, tok=user3_tok) - room1_event_response1 = self.helper.send( - room_id1, body="new event2", tok=user2_tok - ) - # User2 reads the last event (before the `from_token`) - channel = self.make_request( - "POST", - f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", - {}, - access_token=user2_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - - # Create room2 - room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id2, user1_id, tok=user1_tok) - room2_event_response1 = self.helper.send( - room_id2, body="new event2", tok=user2_tok - ) - # User1 reads the last event (before the `from_token`) - channel = self.make_request( - "POST", - f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}", - {}, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - - # Create room3 - room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id3, user1_id, tok=user1_tok) - self.helper.join(room_id3, user3_id, tok=user3_tok) - room3_event_response1 = self.helper.send( - room_id3, body="new event", tok=user2_tok - ) - - # Create room4 - room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id4, user1_id, tok=user1_tok) - self.helper.join(room_id4, user3_id, tok=user3_tok) - event_response4 = self.helper.send(room_id4, body="new event", tok=user2_tok) - # User1 reads the last event (before the `from_token`) - channel = self.make_request( - "POST", - f"/rooms/{room_id4}/receipt/{ReceiptTypes.READ}/{event_response4['event_id']}", - {}, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - - sync_body = { - "lists": {}, - "room_subscriptions": { - room_id1: { - "required_state": [], - "timeline_limit": 0, - }, - room_id3: { - "required_state": [], - "timeline_limit": 0, - }, - room_id4: { - "required_state": [], - "timeline_limit": 0, - }, - }, - "extensions": { - "receipts": { - "enabled": True, - "rooms": [room_id1, room_id2, room_id3, room_id4], - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Add some more read receipts after the `from_token` - # - # User1 reads room1 - channel = self.make_request( - "POST", - f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", - {}, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - # User1 privately reads room2 - channel = self.make_request( - "POST", - f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}", - {}, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - # User3 reads room3 - channel = self.make_request( - "POST", - f"/rooms/{room_id3}/receipt/{ReceiptTypes.READ}/{room3_event_response1['event_id']}", - {}, - access_token=user3_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - # No activity for room4 after the `from_token` - - # Make an incremental Sliding Sync request with the receipts extension enabled - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # Even though we requested room2, we only expect rooms to show up if they are - # already in the Sliding Sync response. room4 doesn't show up because there is - # no activity after the `from_token`. - self.assertIncludes( - response_body["extensions"]["receipts"].get("rooms").keys(), - {room_id1, room_id3}, - exact=True, - ) - - # Check room1: - # - # Sanity check that it's the correct ephemeral event type - self.assertEqual( - response_body["extensions"]["receipts"]["rooms"][room_id1]["type"], - EduTypes.RECEIPT, - ) - # We only see that user1 has read something in room1 since the `from_token` - self.assertIncludes( - response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ - room1_event_response1["event_id"] - ][ReceiptTypes.READ].keys(), - {user1_id}, - exact=True, - ) - # User1 did not send a private read receipt in this room and we shouldn't leak - # others' private read receipts - self.assertIncludes( - response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ - room1_event_response1["event_id"] - ] - .get(ReceiptTypes.READ_PRIVATE, {}) - .keys(), - set(), - exact=True, - ) - # No events in the timeline since they were sent before the `from_token` - self.assertNotIn(room_id1, response_body["rooms"]) - - # Check room3: - # - # Sanity check that it's the correct ephemeral event type - self.assertEqual( - response_body["extensions"]["receipts"]["rooms"][room_id3]["type"], - EduTypes.RECEIPT, - ) - # We only see that user3 has read something in room1 since the `from_token` - self.assertIncludes( - response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][ - room3_event_response1["event_id"] - ][ReceiptTypes.READ].keys(), - {user3_id}, - exact=True, - ) - # User1 did not send a private read receipt in this room and we shouldn't leak - # others' private read receipts - self.assertIncludes( - response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][ - room3_event_response1["event_id"] - ] - .get(ReceiptTypes.READ_PRIVATE, {}) - .keys(), - set(), - exact=True, - ) - # No events in the timeline since they were sent before the `from_token` - self.assertNotIn(room_id3, response_body["rooms"]) - - def test_receipts_incremental_sync_all_live_receipts(self) -> None: - """ - On incremental sync, we return all receipts in the token range for a given room - even if they are not in the timeline. - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - # Create room1 - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id1, user1_id, tok=user1_tok) - - sync_body = { - "lists": {}, - "room_subscriptions": { - room_id1: { - "required_state": [], - # The timeline will only include event2 - "timeline_limit": 1, - }, - }, - "extensions": { - "receipts": { - "enabled": True, - "rooms": [room_id1], - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - room1_event_response1 = self.helper.send( - room_id1, body="new event1", tok=user2_tok - ) - room1_event_response2 = self.helper.send( - room_id1, body="new event2", tok=user2_tok - ) - - # User1 reads event1 - channel = self.make_request( - "POST", - f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}", - {}, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - # User2 reads event2 - channel = self.make_request( - "POST", - f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}", - {}, - access_token=user2_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - - # Make an incremental Sliding Sync request with the receipts extension enabled - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # We should see room1 because it has receipts in the token range - self.assertIncludes( - response_body["extensions"]["receipts"].get("rooms").keys(), - {room_id1}, - exact=True, - ) - # Sanity check that it's the correct ephemeral event type - self.assertEqual( - response_body["extensions"]["receipts"]["rooms"][room_id1]["type"], - EduTypes.RECEIPT, - ) - # We should see all receipts in the token range regardless of whether the events - # are in the timeline - self.assertIncludes( - response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ - room1_event_response1["event_id"] - ][ReceiptTypes.READ].keys(), - {user1_id}, - exact=True, - ) - self.assertIncludes( - response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][ - room1_event_response2["event_id"] - ][ReceiptTypes.READ].keys(), - {user2_id}, - exact=True, - ) - # Only the latest event in the timeline because the `timeline_limit` is 1 - self.assertIncludes( - { - event["event_id"] - for event in response_body["rooms"][room_id1].get("timeline", []) - }, - {room1_event_response2["event_id"]}, - exact=True, - message=str(response_body["rooms"][room_id1]), - ) - - def test_wait_for_new_data(self) -> None: - """ - Test to make sure that the Sliding Sync request waits for new data to arrive. - - (Only applies to incremental syncs with a `timeout` specified) - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - event_response = self.helper.send(room_id, body="new event", tok=user2_tok) - - sync_body = { - "lists": {}, - "room_subscriptions": { - room_id: { - "required_state": [], - "timeline_limit": 0, - }, - }, - "extensions": { - "receipts": { - "enabled": True, - "rooms": [room_id], - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make an incremental Sliding Sync request with the receipts extension enabled - channel = self.make_request( - "POST", - self.sync_endpoint + f"?timeout=10000&pos={from_token}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=5000) - # Bump the receipts to trigger new results - receipt_channel = self.make_request( - "POST", - f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}", - {}, - access_token=user2_tok, - ) - self.assertEqual(receipt_channel.code, 200, receipt_channel.json_body) - # Should respond before the 10 second timeout - channel.await_result(timeout_ms=3000) - self.assertEqual(channel.code, 200, channel.json_body) - - # We should see the new receipt - self.assertIncludes( - channel.json_body.get("extensions", {}) - .get("receipts", {}) - .get("rooms", {}) - .keys(), - {room_id}, - exact=True, - message=str(channel.json_body), - ) - self.assertIncludes( - channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][ - event_response["event_id"] - ][ReceiptTypes.READ].keys(), - {user2_id}, - exact=True, - ) - # User1 did not send a private read receipt in this room and we shouldn't leak - # others' private read receipts - self.assertIncludes( - channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][ - event_response["event_id"] - ] - .get(ReceiptTypes.READ_PRIVATE, {}) - .keys(), - set(), - exact=True, - ) - - def test_wait_for_new_data_timeout(self) -> None: - """ - Test to make sure that the Sliding Sync request waits for new data to arrive but - no data ever arrives so we timeout. We're also making sure that the default data - from the receipts extension doesn't trigger a false-positive for new data. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - sync_body = { - "lists": {}, - "extensions": { - "receipts": { - "enabled": True, - } - }, - } - _, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make the Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?timeout=10000&pos={from_token}", - content=sync_body, - access_token=user1_tok, - await_result=False, - ) - # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=5000) - # Wake-up `notifier.wait_for_events(...)` that will cause us test - # `SlidingSyncResult.__bool__` for new results. - self._bump_notifier_wait_for_events( - user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA - ) - # Block for a little bit more to ensure we don't see any new results. - with self.assertRaises(TimedOutException): - channel.await_result(timeout_ms=4000) - # Wait for the sync to complete (wait for the rest of the 10 second timeout, - # 5000 + 4000 + 1200 > 10000) - channel.await_result(timeout_ms=1200) - self.assertEqual(channel.code, 200, channel.json_body) - - self.assertIncludes( - channel.json_body["extensions"]["receipts"].get("rooms").keys(), - set(), - exact=True, - ) From 2b620e0a150ea86bc8ad0814110bef51d6e3e930 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 31 Jul 2024 13:20:23 -0500 Subject: [PATCH 091/332] Sliding Sync: Add typing notification extension (MSC3961) (#17505) [MSC3961](https://github.com/matrix-org/matrix-spec-proposals/pull/3961): Sliding Sync Extension: Typing Notifications Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync --- changelog.d/17505.feature | 1 + synapse/handlers/receipts.py | 4 + synapse/handlers/sliding_sync.py | 81 +++ synapse/handlers/typing.py | 9 +- synapse/rest/client/sync.py | 6 +- synapse/types/handlers/__init__.py | 24 +- synapse/types/rest/client/__init__.py | 18 + .../sliding_sync/test_extension_typing.py | 482 ++++++++++++++++++ .../client/sliding_sync/test_extensions.py | 22 +- 9 files changed, 640 insertions(+), 7 deletions(-) create mode 100644 changelog.d/17505.feature create mode 100644 tests/rest/client/sliding_sync/test_extension_typing.py diff --git a/changelog.d/17505.feature b/changelog.d/17505.feature new file mode 100644 index 0000000000..ca0c2bd70f --- /dev/null +++ b/changelog.d/17505.feature @@ -0,0 +1 @@ +Add typing notification extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index d04c76be2a..c776654d12 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -288,6 +288,10 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]): explicit_room_id: Optional[str] = None, to_key: Optional[MultiWriterStreamToken] = None, ) -> Tuple[List[JsonMapping], MultiWriterStreamToken]: + """ + Find read receipts for given rooms (> `from_token` and <= `to_token`) + """ + if to_key is None: to_key = self.get_current_key() diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 530e7b7b4e..8467766518 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -2284,11 +2284,24 @@ class SlidingSyncHandler: from_token=from_token, ) + typing_response = None + if sync_config.extensions.typing is not None: + typing_response = await self.get_typing_extension_response( + sync_config=sync_config, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + actual_room_response_map=actual_room_response_map, + typing_request=sync_config.extensions.typing, + to_token=to_token, + from_token=from_token, + ) + return SlidingSyncResult.Extensions( to_device=to_device_response, e2ee=e2ee_response, account_data=account_data_response, receipts=receipts_response, + typing=typing_response, ) def find_relevant_room_ids_for_extension( @@ -2615,6 +2628,8 @@ class SlidingSyncHandler: room_id_to_receipt_map: Dict[str, JsonMapping] = {} if len(relevant_room_ids) > 0: + # TODO: Take connection tracking into account so that when a room comes back + # into range we can send the receipts that were missed. receipt_source = self.event_sources.sources.receipt receipts, _ = await receipt_source.get_new_events( user=sync_config.user, @@ -2636,6 +2651,8 @@ class SlidingSyncHandler: type = receipt["type"] content = receipt["content"] + # For `inital: True` rooms, we only want to include receipts for events + # in the timeline. room_result = actual_room_response_map.get(room_id) if room_result is not None: if room_result.initial: @@ -2659,6 +2676,70 @@ class SlidingSyncHandler: room_id_to_receipt_map=room_id_to_receipt_map, ) + async def get_typing_extension_response( + self, + sync_config: SlidingSyncConfig, + actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], + typing_request: SlidingSyncConfig.Extensions.TypingExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.TypingExtension]: + """Handle Typing Notification extension (MSC3961) + + Args: + sync_config: Sync configuration + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + actual_room_response_map: A map of room ID to room results in the the + Sliding Sync response. + account_data_request: The account_data extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + # Skip if the extension is not enabled + if not typing_request.enabled: + return None + + relevant_room_ids = self.find_relevant_room_ids_for_extension( + requested_lists=typing_request.lists, + requested_room_ids=typing_request.rooms, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + ) + + room_id_to_typing_map: Dict[str, JsonMapping] = {} + if len(relevant_room_ids) > 0: + # Note: We don't need to take connection tracking into account for typing + # notifications because they'll get anything still relevant and hasn't timed + # out when the room comes into range. We consider the gap where the room + # fell out of range, as long enough for any typing notifications to have + # timed out (it's not worth the 30 seconds of data we may have missed). + typing_source = self.event_sources.sources.typing + typing_notifications, _ = await typing_source.get_new_events( + user=sync_config.user, + from_key=(from_token.stream_token.typing_key if from_token else 0), + to_key=to_token.typing_key, + # This is a dummy value and isn't used in the function + limit=0, + room_ids=relevant_room_ids, + is_guest=False, + ) + + for typing_notification in typing_notifications: + # These fields should exist for every typing notification + room_id = typing_notification["room_id"] + type = typing_notification["type"] + content = typing_notification["content"] + + room_id_to_typing_map[room_id] = {"type": type, "content": content} + + return SlidingSyncResult.Extensions.TypingExtension( + room_id_to_typing_map=room_id_to_typing_map, + ) + class HaveSentRoomFlag(Enum): """Flag for whether we have sent the room down a sliding sync connection. diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 4c87718337..8d693fee30 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -565,7 +565,12 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]): room_ids: Iterable[str], is_guest: bool, explicit_room_id: Optional[str] = None, + to_key: Optional[int] = None, ) -> Tuple[List[JsonMapping], int]: + """ + Find typing notifications for given rooms (> `from_token` and <= `to_token`) + """ + with Measure(self.clock, "typing.get_new_events"): from_key = int(from_key) handler = self.get_typing_handler() @@ -574,7 +579,9 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]): for room_id in room_ids: if room_id not in handler._room_serials: continue - if handler._room_serials[room_id] <= from_key: + if handler._room_serials[room_id] <= from_key or ( + to_key is not None and handler._room_serials[room_id] > to_key + ): continue events.append(self._make_event_for(room_id)) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index c607d08de5..4f2c552af2 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -1152,10 +1152,14 @@ class SlidingSyncRestServlet(RestServlet): if extensions.receipts is not None: serialized_extensions["receipts"] = { - # Same as the the top-level `account_data.events` field in Sync v2. "rooms": extensions.receipts.room_id_to_receipt_map, } + if extensions.typing is not None: + serialized_extensions["typing"] = { + "rooms": extensions.typing.room_id_to_typing_map, + } + return serialized_extensions diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index a6adf6c9ea..363f060bef 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -366,7 +366,8 @@ class SlidingSyncResult: """The Receipts extension (MSC3960) Attributes: - room_id_to_receipt_map: Mapping from room_id to `m.receipt` event (type, content) + room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral + event (type, content) """ room_id_to_receipt_map: Mapping[str, JsonMapping] @@ -374,14 +375,33 @@ class SlidingSyncResult: def __bool__(self) -> bool: return bool(self.room_id_to_receipt_map) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class TypingExtension: + """The Typing Notification extension (MSC3961) + + Attributes: + room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral + event (type, content) + """ + + room_id_to_typing_map: Mapping[str, JsonMapping] + + def __bool__(self) -> bool: + return bool(self.room_id_to_typing_map) + to_device: Optional[ToDeviceExtension] = None e2ee: Optional[E2eeExtension] = None account_data: Optional[AccountDataExtension] = None receipts: Optional[ReceiptsExtension] = None + typing: Optional[TypingExtension] = None def __bool__(self) -> bool: return bool( - self.to_device or self.e2ee or self.account_data or self.receipts + self.to_device + or self.e2ee + or self.account_data + or self.receipts + or self.typing ) next_pos: SlidingSyncStreamToken diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 4e632e4492..93b537ab7b 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -359,10 +359,28 @@ class SlidingSyncBody(RequestBodyModel): # Process all room subscriptions defined in the Room Subscription API. (This is the default.) rooms: Optional[List[StrictStr]] = ["*"] + class TypingExtension(RequestBodyModel): + """The Typing Notification extension (MSC3961) + + Attributes: + enabled + lists: List of list keys (from the Sliding Window API) to apply this + extension to. + rooms: List of room IDs (from the Room Subscription API) to apply this + extension to. + """ + + enabled: Optional[StrictBool] = False + # Process all lists defined in the Sliding Window API. (This is the default.) + lists: Optional[List[StrictStr]] = ["*"] + # Process all room subscriptions defined in the Room Subscription API. (This is the default.) + rooms: Optional[List[StrictStr]] = ["*"] + to_device: Optional[ToDeviceExtension] = None e2ee: Optional[E2eeExtension] = None account_data: Optional[AccountDataExtension] = None receipts: Optional[ReceiptsExtension] = None + typing: Optional[TypingExtension] = None conn_id: Optional[str] diff --git a/tests/rest/client/sliding_sync/test_extension_typing.py b/tests/rest/client/sliding_sync/test_extension_typing.py new file mode 100644 index 0000000000..7f523e0f10 --- /dev/null +++ b/tests/rest/client/sliding_sync/test_extension_typing.py @@ -0,0 +1,482 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EduTypes +from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.types import StreamKeyType +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase +from tests.server import TimedOutException + +logger = logging.getLogger(__name__) + + +class SlidingSyncTypingExtensionTestCase(SlidingSyncBase): + """Tests for the typing notification sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def test_no_data_initial_sync(self) -> None: + """ + Test that enabling the typing extension works during an intitial sync, + even if there is no-data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Make an initial Sliding Sync request with the typing extension enabled + sync_body = { + "lists": {}, + "extensions": { + "typing": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertIncludes( + response_body["extensions"]["typing"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_no_data_incremental_sync(self) -> None: + """ + Test that enabling typing extension works during an incremental sync, even + if there is no-data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "typing": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the typing extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertIncludes( + response_body["extensions"]["typing"].get("rooms").keys(), + set(), + exact=True, + ) + + def test_typing_initial_sync(self) -> None: + """ + On initial sync, we return all typing notifications for rooms that we request + and are being returned in the Sliding Sync response. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + # Create a room + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + # User1 starts typing in room1 + channel = self.make_request( + "PUT", + f"/rooms/{room_id1}/typing/{user1_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User2 starts typing in room1 + channel = self.make_request( + "PUT", + f"/rooms/{room_id1}/typing/{user2_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Create another room + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + self.helper.join(room_id2, user3_id, tok=user3_tok) + self.helper.join(room_id2, user4_id, tok=user4_tok) + # User1 starts typing in room2 + channel = self.make_request( + "PUT", + f"/rooms/{room_id2}/typing/{user1_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User2 starts typing in room2 + channel = self.make_request( + "PUT", + f"/rooms/{room_id2}/typing/{user2_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Make an initial Sliding Sync request with the typing extension enabled + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "typing": { + "enabled": True, + "rooms": [room_id1, room_id2], + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Even though we requested room2, we only expect room1 to show up because that's + # the only room in the Sliding Sync response (room2 is not one of our room + # subscriptions or in a sliding window list). + self.assertIncludes( + response_body["extensions"]["typing"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["typing"]["rooms"][room_id1]["type"], + EduTypes.TYPING, + ) + # We can see user1 and user2 typing + self.assertIncludes( + set( + response_body["extensions"]["typing"]["rooms"][room_id1]["content"][ + "user_ids" + ] + ), + {user1_id, user2_id}, + exact=True, + ) + + def test_typing_incremental_sync(self) -> None: + """ + On incremental sync, we return all typing notifications in the token range for a + given room but only for rooms that we request and are being returned in the + Sliding Sync response. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + # Create room1 + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + # User2 starts typing in room1 + channel = self.make_request( + "PUT", + f"/rooms/{room_id1}/typing/{user2_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user2_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Create room2 + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + # User1 starts typing in room2 (before the `from_token`) + channel = self.make_request( + "PUT", + f"/rooms/{room_id2}/typing/{user1_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Create room3 + room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id3, user1_id, tok=user1_tok) + self.helper.join(room_id3, user3_id, tok=user3_tok) + + # Create room4 + room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id4, user1_id, tok=user1_tok) + self.helper.join(room_id4, user3_id, tok=user3_tok) + # User1 starts typing in room4 (before the `from_token`) + channel = self.make_request( + "PUT", + f"/rooms/{room_id4}/typing/{user1_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Advance time so all of the typing notifications timeout before we make our + # Sliding Sync requests. Even though these are sent before the `from_token`, the + # typing code only keeps track of stream position of the latest typing + # notification so "old" typing notifications that are still "alive" (haven't + # timed out) can appear in the response. + self.reactor.advance(36) + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + }, + room_id3: { + "required_state": [], + "timeline_limit": 0, + }, + room_id4: { + "required_state": [], + "timeline_limit": 0, + }, + }, + "extensions": { + "typing": { + "enabled": True, + "rooms": [room_id1, room_id2, room_id3, room_id4], + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Add some more typing notifications after the `from_token` + # + # User1 starts typing in room1 + channel = self.make_request( + "PUT", + f"/rooms/{room_id1}/typing/{user1_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User1 starts typing in room2 + channel = self.make_request( + "PUT", + f"/rooms/{room_id2}/typing/{user1_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # User3 starts typing in room3 + channel = self.make_request( + "PUT", + f"/rooms/{room_id3}/typing/{user3_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user3_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + # No activity for room4 after the `from_token` + + # Make an incremental Sliding Sync request with the typing extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Even though we requested room2, we only expect rooms to show up if they are + # already in the Sliding Sync response. room4 doesn't show up because there is + # no activity after the `from_token`. + self.assertIncludes( + response_body["extensions"]["typing"].get("rooms").keys(), + {room_id1, room_id3}, + exact=True, + ) + + # Check room1: + # + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["typing"]["rooms"][room_id1]["type"], + EduTypes.TYPING, + ) + # We only see that user1 is typing in room1 since the `from_token` + self.assertIncludes( + set( + response_body["extensions"]["typing"]["rooms"][room_id1]["content"][ + "user_ids" + ] + ), + {user1_id}, + exact=True, + ) + + # Check room3: + # + # Sanity check that it's the correct ephemeral event type + self.assertEqual( + response_body["extensions"]["typing"]["rooms"][room_id3]["type"], + EduTypes.TYPING, + ) + # We only see that user3 is typing in room1 since the `from_token` + self.assertIncludes( + set( + response_body["extensions"]["typing"]["rooms"][room_id3]["content"][ + "user_ids" + ] + ), + {user3_id}, + exact=True, + ) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id: { + "required_state": [], + "timeline_limit": 0, + }, + }, + "extensions": { + "typing": { + "enabled": True, + "rooms": [room_id], + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the typing extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the typing status to trigger new results + typing_channel = self.make_request( + "PUT", + f"/rooms/{room_id}/typing/{user2_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user2_tok, + ) + self.assertEqual(typing_channel.code, 200, typing_channel.json_body) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the new typing notification + self.assertIncludes( + channel.json_body.get("extensions", {}) + .get("typing", {}) + .get("rooms", {}) + .keys(), + {room_id}, + exact=True, + message=str(channel.json_body), + ) + self.assertIncludes( + set( + channel.json_body["extensions"]["typing"]["rooms"][room_id]["content"][ + "user_ids" + ] + ), + {user2_id}, + exact=True, + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the typing extension doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "typing": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events( + user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA + ) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + self.assertIncludes( + channel.json_body["extensions"]["typing"].get("rooms").keys(), + set(), + exact=True, + ) diff --git a/tests/rest/client/sliding_sync/test_extensions.py b/tests/rest/client/sliding_sync/test_extensions.py index e42904b69b..68f6661334 100644 --- a/tests/rest/client/sliding_sync/test_extensions.py +++ b/tests/rest/client/sliding_sync/test_extensions.py @@ -12,8 +12,10 @@ # . # import logging +from typing import Literal from parameterized import parameterized +from typing_extensions import assert_never from twisted.test.proto_helpers import MemoryReactor @@ -48,11 +50,16 @@ class SlidingSyncExtensionsTestCase(SlidingSyncBase): self.account_data_handler = hs.get_account_data_handler() # Any extensions that use `lists`/`rooms` should be tested here - @parameterized.expand([("account_data",), ("receipts",)]) - def test_extensions_lists_rooms_relevant_rooms(self, extension_name: str) -> None: + @parameterized.expand([("account_data",), ("receipts",), ("typing",)]) + def test_extensions_lists_rooms_relevant_rooms( + self, + extension_name: Literal["account_data", "receipts", "typing"], + ) -> None: """ With various extensions, test out requesting different variations of `lists`/`rooms`. + + Stresses `SlidingSyncHandler.find_relevant_room_ids_for_extension(...)` """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -95,8 +102,17 @@ class SlidingSyncExtensionsTestCase(SlidingSyncBase): access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) + elif extension_name == "typing": + # Start a typing notification + channel = self.make_request( + "PUT", + f"/rooms/{room_id}/typing/{user1_id}", + b'{"typing": true, "timeout": 30000}', + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) else: - raise AssertionError(f"Unknown extension name: {extension_name}") + assert_never(extension_name) main_sync_body = { "lists": { From e3db7b2d81cabc7e4335afc051e28678e3a9dd02 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 5 Aug 2024 13:20:15 -0500 Subject: [PATCH 092/332] Sliding Sync: Easier to understand timeline assertions in tests (#17511) Added `_assertTimelineEqual(...)` because I got fed up trying to understand the crazy diffs from the standard `self.assertEqual(...)`/`self.assertListEqual(...)` Before: ``` [FAIL] Traceback (most recent call last): File "/home/eric/Documents/github/element/synapse/tests/rest/client/sliding_sync/test_rooms_timeline.py", line 103, in test_rooms_limited_initial_sync self.assertListEqual( File "/usr/lib/python3.12/unittest/case.py", line 1091, in assertListEqual self.assertSequenceEqual(list1, list2, msg, seq_type=list) File "/usr/lib/python3.12/unittest/case.py", line 1073, in assertSequenceEqual self.fail(msg) twisted.trial.unittest.FailTest: Lists differ: ['$4QcmnzhdazSnDYcYSZCS_6-MWSzM_dN3RC7TRvW0w[95 chars]isM'] != ['$8N1XJ7e-3K_wxAanLVD3v8KQ96_B5Xj4huGkgy4N4[95 chars]nnU'] First differing element 0: '$4QcmnzhdazSnDYcYSZCS_6-MWSzM_dN3RC7TRvW0wWA' '$8N1XJ7e-3K_wxAanLVD3v8KQ96_B5Xj4huGkgy4N4-E' - ['$4QcmnzhdazSnDYcYSZCS_6-MWSzM_dN3RC7TRvW0wWA', - '$8N1XJ7e-3K_wxAanLVD3v8KQ96_B5Xj4huGkgy4N4-E', ? ^ + ['$8N1XJ7e-3K_wxAanLVD3v8KQ96_B5Xj4huGkgy4N4-E', ? ^ - '$q4PRxQ_pBZkQI1keYuZPTtExQ23DqpUI3-Lxwfj_isM'] + '$4QcmnzhdazSnDYcYSZCS_6-MWSzM_dN3RC7TRvW0wWA', + '$j3Xj-t2F1wH9kUHsI8X5yqS7hkdSyN2owaArfvk8nnU'] ``` After: ``` [FAIL] Traceback (most recent call last): File "/home/eric/Documents/github/element/synapse/tests/rest/client/sliding_sync/test_rooms_timeline.py", line 178, in test_rooms_limited_initial_sync self._assertTimelineEqual( File "/home/eric/Documents/github/element/synapse/tests/rest/client/sliding_sync/test_rooms_timeline.py", line 110, in _assertTimelineEqual self._assertListEqual( File "/home/eric/Documents/github/element/synapse/tests/rest/client/sliding_sync/test_rooms_timeline.py", line 79, in _assertListEqual self.fail(f"{diff_message}\n{message}") twisted.trial.unittest.FailTest: Items must Expected items to be in actual ('?' = missing expected items): [ (10, master) $w-BoqW1PQQFU4TzVJW5OIelugxh0mY12wrfw6mbC6D4 (m.room.message) activity4 (11, master) $sSidTZf1EOQmCVDU4mrH_1-bopMQhwcDUO2IhoemR6M (m.room.message) activity5 ? (12, master) $bgOcc3D-2QSkbk4aBxKVyOOQJGs7ZuncRJwG3cEANZg (m.room.member, @user1:test) join ] Actual ('+' = found expected items): [ + (11, master) $sSidTZf1EOQmCVDU4mrH_1-bopMQhwcDUO2IhoemR6M (m.room.message) activity5 + (10, master) $w-BoqW1PQQFU4TzVJW5OIelugxh0mY12wrfw6mbC6D4 (m.room.message) activity4 (9, master) $FmCNyc11YeFwiJ4an7_q6H0LCCjQOKd6UCr5VKeXXUw (m.room.message, None) activity3 ] ``` --- changelog.d/17511.misc | 1 + .../sliding_sync/test_rooms_timeline.py | 124 +++++++++++++++--- 2 files changed, 104 insertions(+), 21 deletions(-) create mode 100644 changelog.d/17511.misc diff --git a/changelog.d/17511.misc b/changelog.d/17511.misc new file mode 100644 index 0000000000..abc7be44ba --- /dev/null +++ b/changelog.d/17511.misc @@ -0,0 +1 @@ +Easier to understand `timeline` assertions in Sliding Sync tests. diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py index 84a1e0d223..2e9586ca73 100644 --- a/tests/rest/client/sliding_sync/test_rooms_timeline.py +++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py @@ -12,13 +12,14 @@ # . # import logging +from typing import List, Optional from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.rest.client import login, room, sync from synapse.server import HomeServer -from synapse.types import StreamToken +from synapse.types import StreamToken, StrSequence from synapse.util import Clock from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase @@ -42,6 +43,82 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + def _assertListEqual( + self, + actual_items: StrSequence, + expected_items: StrSequence, + message: Optional[str] = None, + ) -> None: + """ + Like `self.assertListEqual(...)` but with an actually understandable diff message. + """ + + if actual_items == expected_items: + return + + expected_lines: List[str] = [] + for expected_item in expected_items: + is_expected_in_actual = expected_item in actual_items + expected_lines.append( + "{} {}".format(" " if is_expected_in_actual else "?", expected_item) + ) + + actual_lines: List[str] = [] + for actual_item in actual_items: + is_actual_in_expected = actual_item in expected_items + actual_lines.append( + "{} {}".format("+" if is_actual_in_expected else " ", actual_item) + ) + + newline = "\n" + expected_string = f"Expected items to be in actual ('?' = missing expected items):\n [\n{newline.join(expected_lines)}\n ]" + actual_string = f"Actual ('+' = found expected items):\n [\n{newline.join(actual_lines)}\n ]" + first_message = "Items must" + diff_message = f"{first_message}\n{expected_string}\n{actual_string}" + + self.fail(f"{diff_message}\n{message}") + + def _assertTimelineEqual( + self, + *, + room_id: str, + actual_event_ids: List[str], + expected_event_ids: List[str], + message: Optional[str] = None, + ) -> None: + """ + Like `self.assertListEqual(...)` for event IDs in a room but will give a nicer + output with context for what each event_id is (type, stream_ordering, content, + etc). + """ + if actual_event_ids == expected_event_ids: + return + + event_id_set = set(actual_event_ids + expected_event_ids) + events = self.get_success(self.store.get_events(event_id_set)) + + def event_id_to_string(event_id: str) -> str: + event = events.get(event_id) + if event: + state_key = event.get_state_key() + state_key_piece = f", {state_key}" if state_key is not None else "" + return ( + f"({event.internal_metadata.stream_ordering: >2}, {event.internal_metadata.instance_name}) " + + f"{event.event_id} ({event.type}{state_key_piece}) {event.content.get('membership', '')}{event.content.get('body', '')}" + ) + + return f"{event_id} " + + self._assertListEqual( + actual_items=[ + event_id_to_string(event_id) for event_id in actual_event_ids + ], + expected_items=[ + event_id_to_string(event_id) for event_id in expected_event_ids + ], + message=message, + ) + def test_rooms_limited_initial_sync(self) -> None: """ Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit` @@ -85,17 +162,18 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): response_body["rooms"][room_id1], ) # Check to make sure the latest events are returned - self.assertEqual( - [ + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[ event["event_id"] for event in response_body["rooms"][room_id1]["timeline"] ], - [ + expected_event_ids=[ event_response4["event_id"], event_response5["event_id"], user1_join_response["event_id"], ], - response_body["rooms"][room_id1]["timeline"], + message=str(response_body["rooms"][room_id1]["timeline"]), ) # Check to make sure the `prev_batch` points at the right place @@ -227,16 +305,17 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): + str(response_body["rooms"][room_id1]), ) # Check to make sure the latest events are returned - self.assertEqual( - [ + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[ event["event_id"] for event in response_body["rooms"][room_id1]["timeline"] ], - [ + expected_event_ids=[ event_response2["event_id"], event_response3["event_id"], ], - response_body["rooms"][room_id1]["timeline"], + message=str(response_body["rooms"][room_id1]["timeline"]), ) # All events are "live" @@ -303,18 +382,19 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): + str(response_body["rooms"][room_id1]), ) # Check to make sure that the "live" and historical events are returned - self.assertEqual( - [ + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[ event["event_id"] for event in response_body["rooms"][room_id1]["timeline"] ], - [ + expected_event_ids=[ event_response2["event_id"], user1_join_response["event_id"], event_response3["event_id"], event_response4["event_id"], ], - response_body["rooms"][room_id1]["timeline"], + message=str(response_body["rooms"][room_id1]["timeline"]), ) # Only events after the `from_token` are "live" (join, event3, event4) @@ -361,17 +441,18 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # We should see events before the ban but not after - self.assertEqual( - [ + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[ event["event_id"] for event in response_body["rooms"][room_id1]["timeline"] ], - [ + expected_event_ids=[ event_response3["event_id"], event_response4["event_id"], user1_ban_response["event_id"], ], - response_body["rooms"][room_id1]["timeline"], + message=str(response_body["rooms"][room_id1]["timeline"]), ) # No "live" events in an initial sync (no `from_token` to define the "live" # range) @@ -428,17 +509,18 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # We should see events before the ban but not after - self.assertEqual( - [ + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[ event["event_id"] for event in response_body["rooms"][room_id1]["timeline"] ], - [ + expected_event_ids=[ event_response3["event_id"], event_response4["event_id"], user1_ban_response["event_id"], ], - response_body["rooms"][room_id1]["timeline"], + message=str(response_body["rooms"][room_id1]["timeline"]), ) # All live events in the incremental sync self.assertEqual( From c270355349d589e5e8634f0ca8049a16ec7ea4a3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Aug 2024 10:39:11 +0100 Subject: [PATCH 093/332] SS: Reset connection if token is unrecognized (#17529) This triggers the client to start a new sliding sync connection. If we don't do this and the client asks for the full range of rooms, we end up sending down all rooms and their state from scratch (which can be very slow) This causes things like https://github.com/element-hq/element-x-ios/issues/3115 after we restart the server --------- Co-authored-by: Eric Eastwood --- changelog.d/17529.misc | 1 + synapse/api/errors.py | 18 ++++++++++++ synapse/handlers/sliding_sync.py | 27 ++++++++++++++++++ .../sliding_sync/test_rooms_required_state.py | 28 ++++++++----------- 4 files changed, 57 insertions(+), 17 deletions(-) create mode 100644 changelog.d/17529.misc diff --git a/changelog.d/17529.misc b/changelog.d/17529.misc new file mode 100644 index 0000000000..37b2ee07a4 --- /dev/null +++ b/changelog.d/17529.misc @@ -0,0 +1 @@ +Reset the sliding sync connection if we don't recognize the per-connection state position. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index dd4a1ae706..99fc7eab54 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -128,6 +128,10 @@ class Codes(str, Enum): # MSC2677 DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION" + # MSC3575 we are telling the client they need to reset their sliding sync + # connection. + UNKNOWN_POS = "M_UNKNOWN_POS" + class CodeMessageException(RuntimeError): """An exception with integer code, a message string attributes and optional headers. @@ -847,3 +851,17 @@ class PartialStateConflictError(SynapseError): msg=PartialStateConflictError.message(), errcode=Codes.UNKNOWN, ) + + +class SlidingSyncUnknownPosition(SynapseError): + """An error that Synapse can return to signal to the client to expire their + sliding sync connection (i.e. send a new request without a `?since=` + param). + """ + + def __init__(self) -> None: + super().__init__( + HTTPStatus.BAD_REQUEST, + msg="Unknown position", + errcode=Codes.UNKNOWN_POS, + ) diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 8467766518..1936471345 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -47,6 +47,7 @@ from synapse.api.constants import ( EventTypes, Membership, ) +from synapse.api.errors import SlidingSyncUnknownPosition from synapse.events import EventBase, StrippedStateEvent from synapse.events.utils import parse_stripped_state_event, strip_event from synapse.handlers.relations import BundledAggregations @@ -491,6 +492,22 @@ class SlidingSyncHandler: # See https://github.com/matrix-org/matrix-doc/issues/1144 raise NotImplementedError() + if from_token: + # Check that we recognize the connection position, if not tell the + # clients that they need to start again. + # + # If we don't do this and the client asks for the full range of + # rooms, we end up sending down all rooms and their state from + # scratch (which can be very slow). By expiring the connection we + # allow the client a chance to do an initial request with a smaller + # range of rooms to get them some results sooner but will end up + # taking the same amount of time (more with round-trips and + # re-processing) in the end to get everything again. + if not await self.connection_store.is_valid_token( + sync_config, from_token.connection_position + ): + raise SlidingSyncUnknownPosition() + await self.connection_store.mark_token_seen( sync_config=sync_config, from_token=from_token, @@ -2821,6 +2838,16 @@ class SlidingSyncConnectionStore: attr.Factory(dict) ) + async def is_valid_token( + self, sync_config: SlidingSyncConfig, connection_token: int + ) -> bool: + """Return whether the connection token is valid/recognized""" + if connection_token == 0: + return True + + conn_key = self._get_connection_key(sync_config) + return connection_token in self._connections.get(conn_key, {}) + async def have_sent_room( self, sync_config: SlidingSyncConfig, connection_token: int, room_id: str ) -> HaveSentRoom: diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index 03e36914ae..a13cad223f 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -161,10 +161,10 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - def test_rooms_required_state_incremental_sync_restart(self) -> None: + def test_rooms_incremental_sync_restart(self) -> None: """ - Test `rooms.required_state` returns requested state events in the room during an - incremental sync, after a restart (and so the in memory caches are reset). + Test that after a restart (and so the in memory caches are reset) that + we correctly return an `M_UNKNOWN_POS` """ user1_id = self.register_user("user1", "pass") @@ -195,22 +195,16 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): self.hs.get_sliding_sync_handler().connection_store._connections.clear() # Make the Sliding Sync request - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # If the cache has been cleared then we do expect the state to come down - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id1) + channel = self.make_request( + method="POST", + path=self.sync_endpoint + f"?pos={from_token}", + content=sync_body, + access_token=user1_tok, ) - - self._assertRequiredStateIncludes( - response_body["rooms"][room_id1]["required_state"], - { - state_map[(EventTypes.Create, "")], - state_map[(EventTypes.RoomHistoryVisibility, "")], - }, - exact=True, + self.assertEqual(channel.code, 400, channel.json_body) + self.assertEqual( + channel.json_body["errcode"], "M_UNKNOWN_POS", channel.json_body ) - self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard(self) -> None: """ From 23727869c712f7c73a449e68ad2d9445bacde857 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 11:45:44 +0100 Subject: [PATCH 094/332] Bump serde_json from 1.0.121 to 1.0.122 (#17525) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 333499e197..d50ce87d17 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.121" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" +checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" dependencies = [ "itoa", "memchr", From d845e939a9ef0947765dae33ef5a8b3f6f7e4375 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 11:46:48 +0100 Subject: [PATCH 095/332] Bump black from 24.4.2 to 24.8.0 (#17522) --- poetry.lock | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7d8334515a..d0953eb5c9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -107,33 +107,33 @@ typecheck = ["mypy"] [[package]] name = "black" -version = "24.4.2" +version = "24.8.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"}, - {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"}, - {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"}, - {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"}, - {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"}, - {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"}, - {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"}, - {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"}, - {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"}, - {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"}, - {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"}, - {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"}, - {file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"}, - {file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"}, - {file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"}, - {file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"}, - {file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"}, - {file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"}, - {file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"}, - {file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"}, - {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"}, - {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"}, + {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"}, + {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"}, + {file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"}, + {file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"}, + {file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"}, + {file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"}, + {file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"}, + {file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"}, + {file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"}, + {file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"}, + {file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"}, + {file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"}, + {file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"}, + {file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"}, + {file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"}, + {file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"}, + {file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"}, + {file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"}, + {file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"}, + {file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"}, + {file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"}, + {file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"}, ] [package.dependencies] @@ -3196,4 +3196,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "5f458ce53b7469844af2e0c5a9c5ef720736de5f080c4eb8d3a0e60286424f44" +content-hash = "c165cdc1f6612c9f1b5bfd8063c23e2d595d717dd8ac1a468519e902be2cdf93" From 5d8446298cceb01a28ec4762a59001709eeb0c90 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 11:47:06 +0100 Subject: [PATCH 096/332] Bump towncrier from 23.11.0 to 24.7.1 (#17523) --- poetry.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index d0953eb5c9..dcacaaf067 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2649,24 +2649,24 @@ files = [ [[package]] name = "towncrier" -version = "23.11.0" +version = "24.7.1" description = "Building newsfiles for your project." optional = false python-versions = ">=3.8" files = [ - {file = "towncrier-23.11.0-py3-none-any.whl", hash = "sha256:2e519ca619426d189e3c98c99558fe8be50c9ced13ea1fc20a4a353a95d2ded7"}, - {file = "towncrier-23.11.0.tar.gz", hash = "sha256:13937c247e3f8ae20ac44d895cf5f96a60ad46cfdcc1671759530d7837d9ee5d"}, + {file = "towncrier-24.7.1-py3-none-any.whl", hash = "sha256:685e2a94335b5dc47537b4d3b449a25b18571ea85b07dcf6e8df31ba40f692dd"}, + {file = "towncrier-24.7.1.tar.gz", hash = "sha256:57a057faedabcadf1a62f6f9bad726ae566c1f31a411338ddb8316993f583b3d"}, ] [package.dependencies] click = "*" +importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} importlib-resources = {version = ">=5", markers = "python_version < \"3.10\""} -incremental = "*" jinja2 = "*" tomli = {version = "*", markers = "python_version < \"3.11\""} [package.extras] -dev = ["furo", "packaging", "sphinx (>=5)", "twisted"] +dev = ["furo (>=2024.05.06)", "nox", "packaging", "sphinx (>=5)", "twisted"] [[package]] name = "treq" From 2dad7182652f77fe82727729538df244f7833095 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 11:47:19 +0100 Subject: [PATCH 097/332] Bump phonenumbers from 8.13.39 to 8.13.42 (#17521) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index dcacaaf067..278bd6cb6e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1516,13 +1516,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.39" +version = "8.13.42" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.39-py2.py3-none-any.whl", hash = "sha256:3ad2d086fa71e7eef409001b9195ac54bebb0c6e3e752209b558ca192c9229a0"}, - {file = "phonenumbers-8.13.39.tar.gz", hash = "sha256:db7ca4970d206b2056231105300753b1a5b229f43416f8c2b3010e63fbb68d77"}, + {file = "phonenumbers-8.13.42-py2.py3-none-any.whl", hash = "sha256:18acc22ee03116d27b26e990f53806a1770a3e05f05e1620bc09ad187f889456"}, + {file = "phonenumbers-8.13.42.tar.gz", hash = "sha256:7137904f2db3b991701e853174ce8e1cb8f540b8bfdf27617540de04c0b7bed5"}, ] [[package]] From 932cb0a92838918fc38c233be3797adf43b8893d Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 6 Aug 2024 12:24:47 +0100 Subject: [PATCH 098/332] 1.113.0rc1 --- CHANGES.md | 44 +++++++++++++++++++++++++++++++++++++++ changelog.d/17447.feature | 1 - changelog.d/17450.bugfix | 1 - changelog.d/17452.misc | 1 - changelog.d/17476.doc | 1 - changelog.d/17477.feature | 1 - changelog.d/17478.misc | 1 - changelog.d/17479.misc | 1 - changelog.d/17481.misc | 1 - changelog.d/17482.misc | 1 - changelog.d/17489.feature | 1 - changelog.d/17499.bugfix | 1 - changelog.d/17501.misc | 1 - changelog.d/17504.misc | 1 - changelog.d/17505.feature | 1 - changelog.d/17507.misc | 1 - changelog.d/17511.misc | 1 - changelog.d/17529.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 20 files changed, 51 insertions(+), 18 deletions(-) delete mode 100644 changelog.d/17447.feature delete mode 100644 changelog.d/17450.bugfix delete mode 100644 changelog.d/17452.misc delete mode 100644 changelog.d/17476.doc delete mode 100644 changelog.d/17477.feature delete mode 100644 changelog.d/17478.misc delete mode 100644 changelog.d/17479.misc delete mode 100644 changelog.d/17481.misc delete mode 100644 changelog.d/17482.misc delete mode 100644 changelog.d/17489.feature delete mode 100644 changelog.d/17499.bugfix delete mode 100644 changelog.d/17501.misc delete mode 100644 changelog.d/17504.misc delete mode 100644 changelog.d/17505.feature delete mode 100644 changelog.d/17507.misc delete mode 100644 changelog.d/17511.misc delete mode 100644 changelog.d/17529.misc diff --git a/CHANGES.md b/CHANGES.md index b4fddc3e5c..c40c11f98c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,47 @@ +# Synapse 1.113.0rc1 (2024-08-06) + +### Features + +- Track which rooms have been sent to clients in the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17447](https://github.com/element-hq/synapse/issues/17447)) +- Add Account Data extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17477](https://github.com/element-hq/synapse/issues/17477)) +- Add receipts extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17489](https://github.com/element-hq/synapse/issues/17489)) +- Add typing notification extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17505](https://github.com/element-hq/synapse/issues/17505)) + +### Bugfixes + +- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to handle invite/knock rooms when filtering. ([\#17450](https://github.com/element-hq/synapse/issues/17450)) +- Fix a bug introduced in v1.110.0 which caused `/keys/query` to return incomplete results, leading to high network activity and CPU usage on Matrix clients. ([\#17499](https://github.com/element-hq/synapse/issues/17499)) + +### Improved Documentation + +- Update the [`allowed_local_3pids`](https://element-hq.github.io/synapse/v1.112/usage/configuration/config_documentation.html#allowed_local_3pids) config option's msisdn address to a working example. ([\#17476](https://github.com/element-hq/synapse/issues/17476)) + +### Internal Changes + +- Change sliding sync to use their own token format in preparation for storing per-connection state. ([\#17452](https://github.com/element-hq/synapse/issues/17452)) +- Ensure we don't send down negative `bump_stamp` in experimental sliding sync endpoint. ([\#17478](https://github.com/element-hq/synapse/issues/17478)) +- Do not send down empty room entries down experimental sliding sync endpoint. ([\#17479](https://github.com/element-hq/synapse/issues/17479)) +- Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. ([\#17481](https://github.com/element-hq/synapse/issues/17481), [\#17482](https://github.com/element-hq/synapse/issues/17482)) +- Add some opentracing tags and logging to the experimental sliding sync implementation. ([\#17501](https://github.com/element-hq/synapse/issues/17501)) +- Split and move Sliding Sync tests so we have some more sane test file sizes. ([\#17504](https://github.com/element-hq/synapse/issues/17504)) +- Update the `limited` field description in the Sliding Sync response to accurately describe what it actually represents. ([\#17507](https://github.com/element-hq/synapse/issues/17507)) +- Easier to understand `timeline` assertions in Sliding Sync tests. ([\#17511](https://github.com/element-hq/synapse/issues/17511)) +- Reset the sliding sync connection if we don't recognize the per-connection state position. ([\#17529](https://github.com/element-hq/synapse/issues/17529)) + + + +### Updates to locked dependencies + +* Bump bcrypt from 4.1.3 to 4.2.0. ([\#17495](https://github.com/element-hq/synapse/issues/17495)) +* Bump black from 24.4.2 to 24.8.0. ([\#17522](https://github.com/element-hq/synapse/issues/17522)) +* Bump phonenumbers from 8.13.39 to 8.13.42. ([\#17521](https://github.com/element-hq/synapse/issues/17521)) +* Bump ruff from 0.5.4 to 0.5.5. ([\#17494](https://github.com/element-hq/synapse/issues/17494)) +* Bump serde_json from 1.0.120 to 1.0.121. ([\#17493](https://github.com/element-hq/synapse/issues/17493)) +* Bump serde_json from 1.0.121 to 1.0.122. ([\#17525](https://github.com/element-hq/synapse/issues/17525)) +* Bump towncrier from 23.11.0 to 24.7.1. ([\#17523](https://github.com/element-hq/synapse/issues/17523)) +* Bump types-pyopenssl from 24.1.0.20240425 to 24.1.0.20240722. ([\#17496](https://github.com/element-hq/synapse/issues/17496)) +* Bump types-setuptools from 70.1.0.20240627 to 71.1.0.20240726. ([\#17497](https://github.com/element-hq/synapse/issues/17497)) + # Synapse 1.112.0 (2024-07-30) This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7). diff --git a/changelog.d/17447.feature b/changelog.d/17447.feature deleted file mode 100644 index 6f80e298ae..0000000000 --- a/changelog.d/17447.feature +++ /dev/null @@ -1 +0,0 @@ -Track which rooms have been sent to clients in the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17450.bugfix b/changelog.d/17450.bugfix deleted file mode 100644 index 01a521da38..0000000000 --- a/changelog.d/17450.bugfix +++ /dev/null @@ -1 +0,0 @@ -Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to handle invite/knock rooms when filtering. diff --git a/changelog.d/17452.misc b/changelog.d/17452.misc deleted file mode 100644 index 4fd07f617b..0000000000 --- a/changelog.d/17452.misc +++ /dev/null @@ -1 +0,0 @@ -Change sliding sync to use their own token format in preparation for storing per-connection state. diff --git a/changelog.d/17476.doc b/changelog.d/17476.doc deleted file mode 100644 index 89d8d490bb..0000000000 --- a/changelog.d/17476.doc +++ /dev/null @@ -1 +0,0 @@ -Update the [`allowed_local_3pids`](https://element-hq.github.io/synapse/v1.112/usage/configuration/config_documentation.html#allowed_local_3pids) config option's msisdn address to a working example. diff --git a/changelog.d/17477.feature b/changelog.d/17477.feature deleted file mode 100644 index 9785a2ef7b..0000000000 --- a/changelog.d/17477.feature +++ /dev/null @@ -1 +0,0 @@ -Add Account Data extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17478.misc b/changelog.d/17478.misc deleted file mode 100644 index 5406c82742..0000000000 --- a/changelog.d/17478.misc +++ /dev/null @@ -1 +0,0 @@ -Ensure we don't send down negative `bump_stamp` in experimental sliding sync endpoint. diff --git a/changelog.d/17479.misc b/changelog.d/17479.misc deleted file mode 100644 index 4502f71662..0000000000 --- a/changelog.d/17479.misc +++ /dev/null @@ -1 +0,0 @@ -Do not send down empty room entries down experimental sliding sync endpoint. diff --git a/changelog.d/17481.misc b/changelog.d/17481.misc deleted file mode 100644 index ac55538424..0000000000 --- a/changelog.d/17481.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. diff --git a/changelog.d/17482.misc b/changelog.d/17482.misc deleted file mode 100644 index ac55538424..0000000000 --- a/changelog.d/17482.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. diff --git a/changelog.d/17489.feature b/changelog.d/17489.feature deleted file mode 100644 index 5ace1e675e..0000000000 --- a/changelog.d/17489.feature +++ /dev/null @@ -1 +0,0 @@ -Add receipts extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17499.bugfix b/changelog.d/17499.bugfix deleted file mode 100644 index 5cb7b3c30e..0000000000 --- a/changelog.d/17499.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.110.0 which caused `/keys/query` to return incomplete results, leading to high network activity and CPU usage on Matrix clients. diff --git a/changelog.d/17501.misc b/changelog.d/17501.misc deleted file mode 100644 index ba96472acb..0000000000 --- a/changelog.d/17501.misc +++ /dev/null @@ -1 +0,0 @@ -Add some opentracing tags and logging to the experimental sliding sync implementation. diff --git a/changelog.d/17504.misc b/changelog.d/17504.misc deleted file mode 100644 index 4ab892843d..0000000000 --- a/changelog.d/17504.misc +++ /dev/null @@ -1 +0,0 @@ -Split and move Sliding Sync tests so we have some more sane test file sizes. diff --git a/changelog.d/17505.feature b/changelog.d/17505.feature deleted file mode 100644 index ca0c2bd70f..0000000000 --- a/changelog.d/17505.feature +++ /dev/null @@ -1 +0,0 @@ -Add typing notification extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17507.misc b/changelog.d/17507.misc deleted file mode 100644 index 82c4d263be..0000000000 --- a/changelog.d/17507.misc +++ /dev/null @@ -1 +0,0 @@ -Update the `limited` field description in the Sliding Sync response to accurately describe what it actually represents. diff --git a/changelog.d/17511.misc b/changelog.d/17511.misc deleted file mode 100644 index abc7be44ba..0000000000 --- a/changelog.d/17511.misc +++ /dev/null @@ -1 +0,0 @@ -Easier to understand `timeline` assertions in Sliding Sync tests. diff --git a/changelog.d/17529.misc b/changelog.d/17529.misc deleted file mode 100644 index 37b2ee07a4..0000000000 --- a/changelog.d/17529.misc +++ /dev/null @@ -1 +0,0 @@ -Reset the sliding sync connection if we don't recognize the per-connection state position. diff --git a/debian/changelog b/debian/changelog index e35750a35f..e89fdb98dc 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.113.0~rc1) stable; urgency=medium + + * New Synapse release 1.113.0rc1. + + -- Synapse Packaging team Tue, 06 Aug 2024 12:23:23 +0100 + matrix-synapse-py3 (1.112.0) stable; urgency=medium * New Synapse release 1.112.0. diff --git a/pyproject.toml b/pyproject.toml index c8373c6dbc..c29d1534fb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.112.0" +version = "1.113.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 244a255065c8ff7ae9aa6d948ea1f8c707ee943d Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 6 Aug 2024 14:26:51 +0200 Subject: [PATCH 099/332] Clarify `auto_accept_invites.worker_to_run_on` config docs (#17515) --- changelog.d/17515.doc | 3 +++ docs/usage/configuration/config_documentation.md | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17515.doc diff --git a/changelog.d/17515.doc b/changelog.d/17515.doc new file mode 100644 index 0000000000..c2dbe24e9d --- /dev/null +++ b/changelog.d/17515.doc @@ -0,0 +1,3 @@ +Clarify default behaviour of the +[`auto_accept_invites.worker_to_run_on`](https://element-hq.github.io/synapse/develop/usage/configuration/config_documentation.html#auto-accept-invites) +option. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 40f64be856..567bbf88d2 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -4685,7 +4685,9 @@ This setting has the following sub-options: * `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only for direct messages. Defaults to false. * `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false. -* `worker_to_run_on`: Which worker to run this module on. This must match the "worker_name". +* `worker_to_run_on`: Which worker to run this module on. This must match + the "worker_name". If not set or `null`, invites will be accepted on the + main process. NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed. The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join From 1dfa59b238cee0dc62163588cc9481896c288979 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Aug 2024 11:43:43 -0500 Subject: [PATCH 100/332] Sliding Sync: Add more tracing (#17514) Spawning from looking at a couple traces and wanting a little more info. Follow-up to github.com/element-hq/synapse/pull/17501 The changes in this PR allow you to find slow Sliding Sync traces ignoring the `wait_for_events` time. In Jaeger, you can now filter for the `current_sync_for_user` operation with `RESULT.result=true` indicating that it actually returned non-empty results. If you want to find traces for your own user, you can use `RESULT.result=true ARG.sync_config.user="@madlittlemods:matrix.org"` --- changelog.d/17514.misc | 1 + synapse/handlers/sliding_sync.py | 576 ++++++++++-------- synapse/rest/client/sync.py | 9 + synapse/storage/databases/main/roommember.py | 2 + .../storage/databases/main/state_deltas.py | 2 + synapse/storage/databases/main/stream.py | 6 +- 6 files changed, 351 insertions(+), 245 deletions(-) create mode 100644 changelog.d/17514.misc diff --git a/changelog.d/17514.misc b/changelog.d/17514.misc new file mode 100644 index 0000000000..fc3cc37915 --- /dev/null +++ b/changelog.d/17514.misc @@ -0,0 +1 @@ +Add more tracing to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 1936471345..1db96ad41c 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -51,7 +51,14 @@ from synapse.api.errors import SlidingSyncUnknownPosition from synapse.events import EventBase, StrippedStateEvent from synapse.events.utils import parse_stripped_state_event, strip_event from synapse.handlers.relations import BundledAggregations -from synapse.logging.opentracing import log_kv, start_active_span, tag_args, trace +from synapse.logging.opentracing import ( + SynapseTags, + log_kv, + set_tag, + start_active_span, + tag_args, + trace, +) from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.databases.main.state import ( ROOM_UNKNOWN_SENTINEL, @@ -534,125 +541,144 @@ class SlidingSyncHandler: # Keep track of the rooms that we can display and need to fetch more info about relevant_room_map: Dict[str, RoomSyncConfig] = {} if has_lists and sync_config.lists is not None: - sync_room_map = await self.filter_rooms_relevant_for_sync( - user=sync_config.user, - room_membership_for_user_map=room_membership_for_user_map, - ) - - for list_key, list_config in sync_config.lists.items(): - # Apply filters - filtered_sync_room_map = sync_room_map - if list_config.filters is not None: - filtered_sync_room_map = await self.filter_rooms( - sync_config.user, sync_room_map, list_config.filters, to_token - ) - - # Sort the list - sorted_room_info = await self.sort_rooms( - filtered_sync_room_map, to_token + with start_active_span("assemble_sliding_window_lists"): + sync_room_map = await self.filter_rooms_relevant_for_sync( + user=sync_config.user, + room_membership_for_user_map=room_membership_for_user_map, ) - # Find which rooms are partially stated and may need to be filtered out - # depending on the `required_state` requested (see below). - partial_state_room_map = await self.store.is_partial_state_room_batched( - filtered_sync_room_map.keys() - ) - - # Since creating the `RoomSyncConfig` takes some work, let's just do it - # once and make a copy whenever we need it. - room_sync_config = RoomSyncConfig.from_room_config(list_config) - membership_state_keys = room_sync_config.required_state_map.get( - EventTypes.Member - ) - # Also see `StateFilter.must_await_full_state(...)` for comparison - lazy_loading = ( - membership_state_keys is not None - and StateValues.LAZY in membership_state_keys - ) - - ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] - if list_config.ranges: - for range in list_config.ranges: - room_ids_in_list: List[str] = [] - - # We're going to loop through the sorted list of rooms starting - # at the range start index and keep adding rooms until we fill - # up the range or run out of rooms. - # - # Both sides of range are inclusive so we `+ 1` - max_num_rooms = range[1] - range[0] + 1 - for room_membership in sorted_room_info[range[0] :]: - room_id = room_membership.room_id - - if len(room_ids_in_list) >= max_num_rooms: - break - - # Exclude partially-stated rooms unless the `required_state` - # only has `["m.room.member", "$LAZY"]` for membership - # (lazy-loading room members). - if partial_state_room_map.get(room_id) and not lazy_loading: - continue - - # Take the superset of the `RoomSyncConfig` for each room. - # - # Update our `relevant_room_map` with the room we're going - # to display and need to fetch more info about. - existing_room_sync_config = relevant_room_map.get(room_id) - if existing_room_sync_config is not None: - existing_room_sync_config.combine_room_sync_config( - room_sync_config - ) - else: - # Make a copy so if we modify it later, it doesn't - # affect all references. - relevant_room_map[room_id] = ( - room_sync_config.deep_copy() - ) - - room_ids_in_list.append(room_id) - - ops.append( - SlidingSyncResult.SlidingWindowList.Operation( - op=OperationType.SYNC, - range=range, - room_ids=room_ids_in_list, - ) + for list_key, list_config in sync_config.lists.items(): + # Apply filters + filtered_sync_room_map = sync_room_map + if list_config.filters is not None: + filtered_sync_room_map = await self.filter_rooms( + sync_config.user, + sync_room_map, + list_config.filters, + to_token, ) - lists[list_key] = SlidingSyncResult.SlidingWindowList( - count=len(sorted_room_info), - ops=ops, - ) + # Sort the list + sorted_room_info = await self.sort_rooms( + filtered_sync_room_map, to_token + ) + + # Find which rooms are partially stated and may need to be filtered out + # depending on the `required_state` requested (see below). + partial_state_room_map = ( + await self.store.is_partial_state_room_batched( + filtered_sync_room_map.keys() + ) + ) + + # Since creating the `RoomSyncConfig` takes some work, let's just do it + # once and make a copy whenever we need it. + room_sync_config = RoomSyncConfig.from_room_config(list_config) + membership_state_keys = room_sync_config.required_state_map.get( + EventTypes.Member + ) + # Also see `StateFilter.must_await_full_state(...)` for comparison + lazy_loading = ( + membership_state_keys is not None + and StateValues.LAZY in membership_state_keys + ) + + ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] + if list_config.ranges: + for range in list_config.ranges: + room_ids_in_list: List[str] = [] + + # We're going to loop through the sorted list of rooms starting + # at the range start index and keep adding rooms until we fill + # up the range or run out of rooms. + # + # Both sides of range are inclusive so we `+ 1` + max_num_rooms = range[1] - range[0] + 1 + for room_membership in sorted_room_info[range[0] :]: + room_id = room_membership.room_id + + if len(room_ids_in_list) >= max_num_rooms: + break + + # Exclude partially-stated rooms unless the `required_state` + # only has `["m.room.member", "$LAZY"]` for membership + # (lazy-loading room members). + if ( + partial_state_room_map.get(room_id) + and not lazy_loading + ): + continue + + # Take the superset of the `RoomSyncConfig` for each room. + # + # Update our `relevant_room_map` with the room we're going + # to display and need to fetch more info about. + existing_room_sync_config = relevant_room_map.get( + room_id + ) + if existing_room_sync_config is not None: + existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) + else: + # Make a copy so if we modify it later, it doesn't + # affect all references. + relevant_room_map[room_id] = ( + room_sync_config.deep_copy() + ) + + room_ids_in_list.append(room_id) + + ops.append( + SlidingSyncResult.SlidingWindowList.Operation( + op=OperationType.SYNC, + range=range, + room_ids=room_ids_in_list, + ) + ) + + lists[list_key] = SlidingSyncResult.SlidingWindowList( + count=len(sorted_room_info), + ops=ops, + ) # Handle room subscriptions if has_room_subscriptions and sync_config.room_subscriptions is not None: - for room_id, room_subscription in sync_config.room_subscriptions.items(): - room_membership_for_user_at_to_token = ( - await self.check_room_subscription_allowed_for_user( - room_id=room_id, - room_membership_for_user_map=room_membership_for_user_map, - to_token=to_token, + with start_active_span("assemble_room_subscriptions"): + for ( + room_id, + room_subscription, + ) in sync_config.room_subscriptions.items(): + room_membership_for_user_at_to_token = ( + await self.check_room_subscription_allowed_for_user( + room_id=room_id, + room_membership_for_user_map=room_membership_for_user_map, + to_token=to_token, + ) ) - ) - # Skip this room if the user isn't allowed to see it - if not room_membership_for_user_at_to_token: - continue + # Skip this room if the user isn't allowed to see it + if not room_membership_for_user_at_to_token: + continue - room_membership_for_user_map[room_id] = ( - room_membership_for_user_at_to_token - ) + room_membership_for_user_map[room_id] = ( + room_membership_for_user_at_to_token + ) - # Take the superset of the `RoomSyncConfig` for each room. - # - # Update our `relevant_room_map` with the room we're going to display - # and need to fetch more info about. - room_sync_config = RoomSyncConfig.from_room_config(room_subscription) - existing_room_sync_config = relevant_room_map.get(room_id) - if existing_room_sync_config is not None: - existing_room_sync_config.combine_room_sync_config(room_sync_config) - else: - relevant_room_map[room_id] = room_sync_config + # Take the superset of the `RoomSyncConfig` for each room. + # + # Update our `relevant_room_map` with the room we're going to display + # and need to fetch more info about. + room_sync_config = RoomSyncConfig.from_room_config( + room_subscription + ) + existing_room_sync_config = relevant_room_map.get(room_id) + if existing_room_sync_config is not None: + existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) + else: + relevant_room_map[room_id] = room_sync_config # Fetch room data rooms: Dict[str, SlidingSyncResult.RoomResult] = {} @@ -661,48 +687,49 @@ class SlidingSyncHandler: # previously. # Keep track of the rooms that we're going to display and need to fetch more info about relevant_rooms_to_send_map = relevant_room_map - if from_token: - rooms_should_send = set() + with start_active_span("filter_relevant_rooms_to_send"): + if from_token: + rooms_should_send = set() - # First we check if there are rooms that match a list/room - # subscription and have updates we need to send (i.e. either because - # we haven't sent the room down, or we have but there are missing - # updates). - for room_id in relevant_room_map: - status = await self.connection_store.have_sent_room( - sync_config, - from_token.connection_position, - room_id, + # First we check if there are rooms that match a list/room + # subscription and have updates we need to send (i.e. either because + # we haven't sent the room down, or we have but there are missing + # updates). + for room_id in relevant_room_map: + status = await self.connection_store.have_sent_room( + sync_config, + from_token.connection_position, + room_id, + ) + if ( + # The room was never sent down before so the client needs to know + # about it regardless of any updates. + status.status == HaveSentRoomFlag.NEVER + # `PREVIOUSLY` literally means the "room was sent down before *AND* + # there are updates we haven't sent down" so we already know this + # room has updates. + or status.status == HaveSentRoomFlag.PREVIOUSLY + ): + rooms_should_send.add(room_id) + elif status.status == HaveSentRoomFlag.LIVE: + # We know that we've sent all updates up until `from_token`, + # so we just need to check if there have been updates since + # then. + pass + else: + assert_never(status.status) + + # We only need to check for new events since any state changes + # will also come down as new events. + rooms_that_have_updates = self.store.get_rooms_that_might_have_updates( + relevant_room_map.keys(), from_token.stream_token.room_key ) - if ( - # The room was never sent down before so the client needs to know - # about it regardless of any updates. - status.status == HaveSentRoomFlag.NEVER - # `PREVIOUSLY` literally means the "room was sent down before *AND* - # there are updates we haven't sent down" so we already know this - # room has updates. - or status.status == HaveSentRoomFlag.PREVIOUSLY - ): - rooms_should_send.add(room_id) - elif status.status == HaveSentRoomFlag.LIVE: - # We know that we've sent all updates up until `from_token`, - # so we just need to check if there have been updates since - # then. - pass - else: - assert_never(status.status) - - # We only need to check for new events since any state changes - # will also come down as new events. - rooms_that_have_updates = self.store.get_rooms_that_might_have_updates( - relevant_room_map.keys(), from_token.stream_token.room_key - ) - rooms_should_send.update(rooms_that_have_updates) - relevant_rooms_to_send_map = { - room_id: room_sync_config - for room_id, room_sync_config in relevant_room_map.items() - if room_id in rooms_should_send - } + rooms_should_send.update(rooms_that_have_updates) + relevant_rooms_to_send_map = { + room_id: room_sync_config + for room_id, room_sync_config in relevant_room_map.items() + if room_id in rooms_should_send + } @trace @tag_args @@ -754,13 +781,20 @@ class SlidingSyncHandler: # Initial sync without a `from_token` starts at `0` connection_position = 0 - return SlidingSyncResult( + sliding_sync_result = SlidingSyncResult( next_pos=SlidingSyncStreamToken(to_token, connection_position), lists=lists, rooms=rooms, extensions=extensions, ) + # Make it easy to find traces for syncs that aren't empty + set_tag(SynapseTags.RESULT_PREFIX + "result", bool(sliding_sync_result)) + set_tag(SynapseTags.FUNC_ARG_PREFIX + "sync_config.user", user_id) + + return sliding_sync_result + + @trace async def get_room_membership_for_user_at_to_token( self, user: UserID, @@ -1099,6 +1133,7 @@ class SlidingSyncHandler: return sync_room_id_set + @trace async def filter_rooms_relevant_for_sync( self, user: UserID, @@ -1209,6 +1244,7 @@ class SlidingSyncHandler: # return None + @trace async def _bulk_get_stripped_state_for_rooms_from_sync_room_map( self, room_ids: StrCollection, @@ -1299,6 +1335,7 @@ class SlidingSyncHandler: return room_id_to_stripped_state_map + @trace async def _bulk_get_partial_current_state_content_for_rooms( self, content_type: Literal[ @@ -1498,125 +1535,132 @@ class SlidingSyncHandler: # Filter for Direct-Message (DM) rooms if filters.is_dm is not None: - if filters.is_dm: - # Only DM rooms please - filtered_room_id_set = { - room_id - for room_id in filtered_room_id_set - if sync_room_map[room_id].is_dm - } - else: - # Only non-DM rooms please - filtered_room_id_set = { - room_id - for room_id in filtered_room_id_set - if not sync_room_map[room_id].is_dm - } + with start_active_span("filters.is_dm"): + if filters.is_dm: + # Only DM rooms please + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if sync_room_map[room_id].is_dm + } + else: + # Only non-DM rooms please + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if not sync_room_map[room_id].is_dm + } if filters.spaces is not None: - raise NotImplementedError() + with start_active_span("filters.spaces"): + raise NotImplementedError() # Filter for encrypted rooms if filters.is_encrypted is not None: - room_id_to_encryption = ( - await self._bulk_get_partial_current_state_content_for_rooms( - content_type="room_encryption", - room_ids=filtered_room_id_set, - to_token=to_token, - sync_room_map=sync_room_map, - room_id_to_stripped_state_map=room_id_to_stripped_state_map, + with start_active_span("filters.is_encrypted"): + room_id_to_encryption = ( + await self._bulk_get_partial_current_state_content_for_rooms( + content_type="room_encryption", + room_ids=filtered_room_id_set, + to_token=to_token, + sync_room_map=sync_room_map, + room_id_to_stripped_state_map=room_id_to_stripped_state_map, + ) ) - ) - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - encryption = room_id_to_encryption.get(room_id, ROOM_UNKNOWN_SENTINEL) + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + encryption = room_id_to_encryption.get( + room_id, ROOM_UNKNOWN_SENTINEL + ) - # Just remove rooms if we can't determine their encryption status - if encryption is ROOM_UNKNOWN_SENTINEL: - filtered_room_id_set.remove(room_id) - continue + # Just remove rooms if we can't determine their encryption status + if encryption is ROOM_UNKNOWN_SENTINEL: + filtered_room_id_set.remove(room_id) + continue - # If we're looking for encrypted rooms, filter out rooms that are not - # encrypted and vice versa - is_encrypted = encryption is not None - if (filters.is_encrypted and not is_encrypted) or ( - not filters.is_encrypted and is_encrypted - ): - filtered_room_id_set.remove(room_id) + # If we're looking for encrypted rooms, filter out rooms that are not + # encrypted and vice versa + is_encrypted = encryption is not None + if (filters.is_encrypted and not is_encrypted) or ( + not filters.is_encrypted and is_encrypted + ): + filtered_room_id_set.remove(room_id) # Filter for rooms that the user has been invited to if filters.is_invite is not None: - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - room_for_user = sync_room_map[room_id] - # If we're looking for invite rooms, filter out rooms that the user is - # not invited to and vice versa - if ( - filters.is_invite and room_for_user.membership != Membership.INVITE - ) or ( - not filters.is_invite - and room_for_user.membership == Membership.INVITE - ): - filtered_room_id_set.remove(room_id) + with start_active_span("filters.is_invite"): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + room_for_user = sync_room_map[room_id] + # If we're looking for invite rooms, filter out rooms that the user is + # not invited to and vice versa + if ( + filters.is_invite + and room_for_user.membership != Membership.INVITE + ) or ( + not filters.is_invite + and room_for_user.membership == Membership.INVITE + ): + filtered_room_id_set.remove(room_id) # Filter by room type (space vs room, etc). A room must match one of the types # provided in the list. `None` is a valid type for rooms which do not have a # room type. if filters.room_types is not None or filters.not_room_types is not None: - room_id_to_type = ( - await self._bulk_get_partial_current_state_content_for_rooms( - content_type="room_type", - room_ids=filtered_room_id_set, - to_token=to_token, - sync_room_map=sync_room_map, - room_id_to_stripped_state_map=room_id_to_stripped_state_map, + with start_active_span("filters.room_types"): + room_id_to_type = ( + await self._bulk_get_partial_current_state_content_for_rooms( + content_type="room_type", + room_ids=filtered_room_id_set, + to_token=to_token, + sync_room_map=sync_room_map, + room_id_to_stripped_state_map=room_id_to_stripped_state_map, + ) ) - ) - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - room_type = room_id_to_type.get(room_id, ROOM_UNKNOWN_SENTINEL) + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + room_type = room_id_to_type.get(room_id, ROOM_UNKNOWN_SENTINEL) - # Just remove rooms if we can't determine their type - if room_type is ROOM_UNKNOWN_SENTINEL: - filtered_room_id_set.remove(room_id) - continue + # Just remove rooms if we can't determine their type + if room_type is ROOM_UNKNOWN_SENTINEL: + filtered_room_id_set.remove(room_id) + continue - if ( - filters.room_types is not None - and room_type not in filters.room_types - ): - filtered_room_id_set.remove(room_id) + if ( + filters.room_types is not None + and room_type not in filters.room_types + ): + filtered_room_id_set.remove(room_id) - if ( - filters.not_room_types is not None - and room_type in filters.not_room_types - ): - filtered_room_id_set.remove(room_id) + if ( + filters.not_room_types is not None + and room_type in filters.not_room_types + ): + filtered_room_id_set.remove(room_id) if filters.room_name_like is not None: - # TODO: The room name is a bit more sensitive to leak than the - # create/encryption event. Maybe we should consider a better way to fetch - # historical state before implementing this. - # - # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms( - # content_type="room_name", - # room_ids=filtered_room_id_set, - # to_token=to_token, - # sync_room_map=sync_room_map, - # room_id_to_stripped_state_map=room_id_to_stripped_state_map, - # ) - raise NotImplementedError() + with start_active_span("filters.room_name_like"): + # TODO: The room name is a bit more sensitive to leak than the + # create/encryption event. Maybe we should consider a better way to fetch + # historical state before implementing this. + # + # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms( + # content_type="room_name", + # room_ids=filtered_room_id_set, + # to_token=to_token, + # sync_room_map=sync_room_map, + # room_id_to_stripped_state_map=room_id_to_stripped_state_map, + # ) + raise NotImplementedError() - if filters.tags is not None: - raise NotImplementedError() - - if filters.not_tags is not None: - raise NotImplementedError() + if filters.tags is not None or filters.not_tags is not None: + with start_active_span("filters.tags"): + raise NotImplementedError() # Assemble a new sync room map but only with the `filtered_room_id_set` return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} @@ -1678,6 +1722,7 @@ class SlidingSyncHandler: reverse=True, ) + @trace async def get_current_state_ids_at( self, room_id: str, @@ -1742,6 +1787,7 @@ class SlidingSyncHandler: return state_ids + @trace async def get_current_state_at( self, room_id: str, @@ -1803,6 +1849,15 @@ class SlidingSyncHandler: """ user = sync_config.user + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "membership", + room_membership_for_user_at_to_token.membership, + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "timeline_limit", + room_sync_config.timeline_limit, + ) + # Determine whether we should limit the timeline to the token range. # # We should return historical messages (before token range) in the @@ -2070,6 +2125,10 @@ class SlidingSyncHandler: if StateValues.WILDCARD in room_sync_config.required_state_map.get( StateValues.WILDCARD, set() ): + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "required_state_wildcard", + True, + ) required_state_filter = StateFilter.all() # TODO: `StateFilter` currently doesn't support wildcard event types. We're # currently working around this by returning all state to the client but it @@ -2079,6 +2138,10 @@ class SlidingSyncHandler: room_sync_config.required_state_map.get(StateValues.WILDCARD) is not None ): + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "required_state_wildcard_event_type", + True, + ) required_state_filter = StateFilter.all() else: required_state_types: List[Tuple[str, Optional[str]]] = [] @@ -2086,8 +2149,12 @@ class SlidingSyncHandler: state_type, state_key_set, ) in room_sync_config.required_state_map.items(): + num_wild_state_keys = 0 + lazy_load_room_members = False + num_others = 0 for state_key in state_key_set: if state_key == StateValues.WILDCARD: + num_wild_state_keys += 1 # `None` is a wildcard in the `StateFilter` required_state_types.append((state_type, None)) # We need to fetch all relevant people when we're lazy-loading membership @@ -2095,6 +2162,7 @@ class SlidingSyncHandler: state_type == EventTypes.Member and state_key == StateValues.LAZY ): + lazy_load_room_members = True # Everyone in the timeline is relevant timeline_membership: Set[str] = set() if timeline_events is not None: @@ -2109,10 +2177,26 @@ class SlidingSyncHandler: # FIXME: We probably also care about invite, ban, kick, targets, etc # but the spec only mentions "senders". elif state_key == StateValues.ME: + num_others += 1 required_state_types.append((state_type, user.to_string())) else: + num_others += 1 required_state_types.append((state_type, state_key)) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + + "required_state_wildcard_state_key_count", + num_wild_state_keys, + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "required_state_lazy", + lazy_load_room_members, + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "required_state_other_count", + num_others, + ) + required_state_filter = StateFilter.from_types(required_state_types) # We need this base set of info for the response so let's just fetch it along @@ -2208,6 +2292,8 @@ class SlidingSyncHandler: if new_bump_event_pos.stream > 0: bump_stamp = new_bump_event_pos.stream + set_tag(SynapseTags.RESULT_PREFIX + "initial", initial) + return SlidingSyncResult.RoomResult( name=room_name, avatar=room_avatar, @@ -2863,6 +2949,7 @@ class SlidingSyncConnectionStore: return room_status + @trace async def record_rooms( self, sync_config: SlidingSyncConfig, @@ -2938,6 +3025,7 @@ class SlidingSyncConnectionStore: return new_store_token + @trace async def mark_token_seen( self, sync_config: SlidingSyncConfig, diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 4f2c552af2..8c5db2a513 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -899,6 +899,9 @@ class SlidingSyncRestServlet(RestServlet): body = parse_and_validate_json_object_from_request(request, SlidingSyncBody) # Tag and log useful data to differentiate requests. + set_tag( + "sliding_sync.sync_type", "initial" if from_token is None else "incremental" + ) set_tag("sliding_sync.conn_id", body.conn_id or "") log_kv( { @@ -912,6 +915,12 @@ class SlidingSyncRestServlet(RestServlet): "sliding_sync.room_subscriptions": list( (body.room_subscriptions or {}).keys() ), + # We also include the number of room subscriptions because logs are + # limited to 1024 characters and the large room ID list above can be cut + # off. + "sliding_sync.num_room_subscriptions": len( + (body.room_subscriptions or {}).keys() + ), } ) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 640ab123f0..1d9f0f52e1 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -39,6 +39,7 @@ from typing import ( import attr from synapse.api.constants import EventTypes, Membership +from synapse.logging.opentracing import trace from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -422,6 +423,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): return invite return None + @trace async def get_rooms_for_local_user_where_membership_is( self, user_id: str, diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index da3ebe66b8..9ed39e688a 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -24,6 +24,7 @@ from typing import List, Optional, Tuple import attr +from synapse.logging.opentracing import trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main.stream import _filter_results_by_stream @@ -159,6 +160,7 @@ class StateDeltasStore(SQLBaseStore): self._get_max_stream_id_in_current_state_deltas_txn, ) + @trace async def get_current_state_deltas_for_room( self, room_id: str, from_token: RoomStreamToken, to_token: RoomStreamToken ) -> List[StateDelta]: diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 4207e73c7f..95775e3804 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -67,7 +67,7 @@ from synapse.api.constants import Direction, EventTypes, Membership from synapse.api.filtering import Filter from synapse.events import EventBase from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import tag_args, trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -812,6 +812,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return ret, key + @trace async def get_current_state_delta_membership_changes_for_user( self, user_id: str, @@ -1186,6 +1187,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return None + @trace async def get_last_event_pos_in_room_before_stream_ordering( self, room_id: str, @@ -1940,6 +1942,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return rows, next_token @trace + @tag_args async def paginate_room_events( self, room_id: str, @@ -2105,6 +2108,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return None + @trace def get_rooms_that_might_have_updates( self, room_ids: StrCollection, from_token: RoomStreamToken ) -> StrCollection: From ceb3686dcde1f22cd28cee17a0922b6a382563c2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Aug 2024 10:32:36 +0100 Subject: [PATCH 101/332] Fixup sliding sync comment (#17531) c.f. https://github.com/element-hq/synapse/pull/17529#discussion_r1705780925 --- changelog.d/17531.misc | 1 + synapse/api/errors.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17531.misc diff --git a/changelog.d/17531.misc b/changelog.d/17531.misc new file mode 100644 index 0000000000..25b7b36a72 --- /dev/null +++ b/changelog.d/17531.misc @@ -0,0 +1 @@ +Fixup comment in sliding sync implementation. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 99fc7eab54..e6efa7a424 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -128,7 +128,7 @@ class Codes(str, Enum): # MSC2677 DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION" - # MSC3575 we are telling the client they need to reset their sliding sync + # MSC3575 we are telling the client they need to expire their sliding sync # connection. UNKNOWN_POS = "M_UNKNOWN_POS" From eb62d120633195e3f6f35a25134fd65806609d79 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Aug 2024 10:37:13 +0100 Subject: [PATCH 102/332] Bump regex from 1.10.5 to 1.10.6 (#17527) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d50ce87d17..fb2c074347 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -444,9 +444,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", From 30e9f6e4697df720f2868ec86e9ef6493d5a144d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Aug 2024 10:37:54 +0100 Subject: [PATCH 103/332] Bump bytes from 1.6.1 to 1.7.1 (#17526) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb2c074347..ce5520436d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytes" -version = "1.6.1" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cfg-if" From 11db575218d2601384e05519a45d930f34d0b1ae Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 7 Aug 2024 11:27:50 -0500 Subject: [PATCH 104/332] Sliding Sync: Use `stream_ordering` based timeline pagination for incremental sync (#17510) Use `stream_ordering` based `timeline` pagination for incremental `/sync` in Sliding Sync. Previously, we were always using a `topological_ordering` but we should only be using that for historical scenarios (initial `/sync`, newly joined, or haven't sent the room down the connection before). This is slightly different than what the [spec suggests](https://spec.matrix.org/v1.10/client-server-api/#syncing) > Events are ordered in this API according to the arrival time of the event on the homeserver. This can conflict with other APIs which order events based on their partial ordering in the event graph. This can result in duplicate events being received (once per distinct API called). Clients SHOULD de-duplicate events based on the event ID when this happens. But we've had a [discussion below in this PR](https://github.com/element-hq/synapse/pull/17510#discussion_r1699105569) and this matches what Sync v2 already does and seems like it makes sense. Created a spec issue https://github.com/matrix-org/matrix-spec/issues/1917 to clarify this. Related issues: - https://github.com/matrix-org/matrix-spec/issues/1917 - https://github.com/matrix-org/matrix-spec/issues/852 - https://github.com/matrix-org/matrix-spec-proposals/pull/4033 --- changelog.d/17510.bugfix | 1 + docs/development/room-dag-concepts.md | 6 +- synapse/handlers/admin.py | 10 +- synapse/handlers/pagination.py | 32 +-- synapse/handlers/room.py | 2 +- synapse/handlers/sliding_sync.py | 40 +++- synapse/handlers/sync.py | 69 ++++-- synapse/storage/databases/main/stream.py | 277 ++++++++++++++++------- tests/storage/test_stream.py | 2 +- 9 files changed, 312 insertions(+), 127 deletions(-) create mode 100644 changelog.d/17510.bugfix diff --git a/changelog.d/17510.bugfix b/changelog.d/17510.bugfix new file mode 100644 index 0000000000..3170c284bd --- /dev/null +++ b/changelog.d/17510.bugfix @@ -0,0 +1 @@ +Fix timeline ordering (using `stream_ordering` instead of topological ordering) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/docs/development/room-dag-concepts.md b/docs/development/room-dag-concepts.md index 76709487f8..35b667831c 100644 --- a/docs/development/room-dag-concepts.md +++ b/docs/development/room-dag-concepts.md @@ -21,8 +21,10 @@ incrementing integer, but backfilled events start with `stream_ordering=-1` and --- - - `/sync` returns things in the order they arrive at the server (`stream_ordering`). - - `/messages` (and `/backfill` in the federation API) return them in the order determined by the event graph `(topological_ordering, stream_ordering)`. + - Incremental `/sync?since=xxx` returns things in the order they arrive at the server + (`stream_ordering`). + - Initial `/sync`, `/messages` (and `/backfill` in the federation API) return them in + the order determined by the event graph `(topological_ordering, stream_ordering)`. The general idea is that, if you're following a room in real-time (i.e. `/sync`), you probably want to see the messages as they arrive at your server, diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index ec35784c5f..b44e862493 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -197,8 +197,14 @@ class AdminHandler: # events that we have and then filtering, this isn't the most # efficient method perhaps but it does guarantee we get everything. while True: - events, _ = await self._store.paginate_room_events( - room_id, from_key, to_key, limit=100, direction=Direction.FORWARDS + events, _ = ( + await self._store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_key, + to_key=to_key, + limit=100, + direction=Direction.FORWARDS, + ) ) if not events: break diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 872c85fbad..6fd7afa280 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -507,13 +507,15 @@ class PaginationHandler: # Initially fetch the events from the database. With any luck, we can return # these without blocking on backfill (handled below). - events, next_key = await self.store.paginate_room_events( - room_id=room_id, - from_key=from_token.room_key, - to_key=to_room_key, - direction=pagin_config.direction, - limit=pagin_config.limit, - event_filter=event_filter, + events, next_key = ( + await self.store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, + event_filter=event_filter, + ) ) if pagin_config.direction == Direction.BACKWARDS: @@ -582,13 +584,15 @@ class PaginationHandler: # If we did backfill something, refetch the events from the database to # catch anything new that might have been added since we last fetched. if did_backfill: - events, next_key = await self.store.paginate_room_events( - room_id=room_id, - from_key=from_token.room_key, - to_key=to_room_key, - direction=pagin_config.direction, - limit=pagin_config.limit, - event_filter=event_filter, + events, next_key = ( + await self.store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, + event_filter=event_filter, + ) ) else: # Otherwise, we can backfill in the background for eventual diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 262d9f4044..2c6e672ede 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1750,7 +1750,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]): from_key=from_key, to_key=to_key, limit=limit or 10, - order="ASC", + direction=Direction.FORWARDS, ) events = list(room_events) diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 1db96ad41c..0fe66c8bd2 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -64,7 +64,10 @@ from synapse.storage.databases.main.state import ( ROOM_UNKNOWN_SENTINEL, Sentinel as StateSentinel, ) -from synapse.storage.databases.main.stream import CurrentStateDeltaMembership +from synapse.storage.databases.main.stream import ( + CurrentStateDeltaMembership, + PaginateFunction, +) from synapse.storage.roommember import MemberSummary from synapse.types import ( DeviceListUpdates, @@ -1863,10 +1866,13 @@ class SlidingSyncHandler: # We should return historical messages (before token range) in the # following cases because we want clients to be able to show a basic # screen of information: + # # - Initial sync (because no `from_token` to limit us anyway) # - When users `newly_joined` # - For an incremental sync where we haven't sent it down this # connection before + # + # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917 from_bound = None initial = True if from_token and not room_membership_for_user_at_to_token.newly_joined: @@ -1927,7 +1933,36 @@ class SlidingSyncHandler: room_membership_for_user_at_to_token.event_pos.to_room_stream_token() ) - timeline_events, new_room_key = await self.store.paginate_room_events( + # For initial `/sync` (and other historical scenarios mentioned above), we + # want to view a historical section of the timeline; to fetch events by + # `topological_ordering` (best representation of the room DAG as others were + # seeing it at the time). This also aligns with the order that `/messages` + # returns events in. + # + # For incremental `/sync`, we want to get all updates for rooms since + # the last `/sync` (regardless if those updates arrived late or happened + # a while ago in the past); to fetch events by `stream_ordering` (in the + # order they were received by the server). + # + # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917 + # + # FIXME: Using workaround for mypy, + # https://github.com/python/mypy/issues/10740#issuecomment-1997047277 and + # https://github.com/python/mypy/issues/17479 + paginate_room_events_by_topological_ordering: PaginateFunction = ( + self.store.paginate_room_events_by_topological_ordering + ) + paginate_room_events_by_stream_ordering: PaginateFunction = ( + self.store.paginate_room_events_by_stream_ordering + ) + pagination_method: PaginateFunction = ( + # Use `topographical_ordering` for historical events + paginate_room_events_by_topological_ordering + if from_bound is None + # Use `stream_ordering` for updates + else paginate_room_events_by_stream_ordering + ) + timeline_events, new_room_key = await pagination_method( room_id=room_id, # The bounds are reversed so we can paginate backwards # (from newer to older events) starting at to_bound. @@ -1938,7 +1973,6 @@ class SlidingSyncHandler: # We add one so we can determine if there are enough events to saturate # the limit or not (see `limited`) limit=room_sync_config.timeline_limit + 1, - event_filter=None, ) # We want to return the events in ascending order (the last event is the diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ede014180c..6af2eeb75f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -43,6 +43,7 @@ from prometheus_client import Counter from synapse.api.constants import ( AccountDataTypes, + Direction, EventContentFields, EventTypes, JoinRules, @@ -64,6 +65,7 @@ from synapse.logging.opentracing import ( ) from synapse.storage.databases.main.event_push_actions import RoomNotifCounts from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary +from synapse.storage.databases.main.stream import PaginateFunction from synapse.storage.roommember import MemberSummary from synapse.types import ( DeviceListUpdates, @@ -879,22 +881,49 @@ class SyncHandler: since_key = since_token.room_key while limited and len(recents) < timeline_limit and max_repeat: - # If we have a since_key then we are trying to get any events - # that have happened since `since_key` up to `end_key`, so we - # can just use `get_room_events_stream_for_room`. - # Otherwise, we want to return the last N events in the room - # in topological ordering. - if since_key: - events, end_key = await self.store.get_room_events_stream_for_room( - room_id, - limit=load_limit + 1, - from_key=since_key, - to_key=end_key, - ) - else: - events, end_key = await self.store.get_recent_events_for_room( - room_id, limit=load_limit + 1, end_token=end_key - ) + # For initial `/sync`, we want to view a historical section of the + # timeline; to fetch events by `topological_ordering` (best + # representation of the room DAG as others were seeing it at the time). + # This also aligns with the order that `/messages` returns events in. + # + # For incremental `/sync`, we want to get all updates for rooms since + # the last `/sync` (regardless if those updates arrived late or happened + # a while ago in the past); to fetch events by `stream_ordering` (in the + # order they were received by the server). + # + # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917 + # + # FIXME: Using workaround for mypy, + # https://github.com/python/mypy/issues/10740#issuecomment-1997047277 and + # https://github.com/python/mypy/issues/17479 + paginate_room_events_by_topological_ordering: PaginateFunction = ( + self.store.paginate_room_events_by_topological_ordering + ) + paginate_room_events_by_stream_ordering: PaginateFunction = ( + self.store.paginate_room_events_by_stream_ordering + ) + pagination_method: PaginateFunction = ( + # Use `topographical_ordering` for historical events + paginate_room_events_by_topological_ordering + if since_key is None + # Use `stream_ordering` for updates + else paginate_room_events_by_stream_ordering + ) + events, end_key = await pagination_method( + room_id=room_id, + # The bounds are reversed so we can paginate backwards + # (from newer to older events) starting at to_bound. + # This ensures we fill the `limit` with the newest events first, + from_key=end_key, + to_key=since_key, + direction=Direction.BACKWARDS, + # We add one so we can determine if there are enough events to saturate + # the limit or not (see `limited`) + limit=load_limit + 1, + ) + # We want to return the events in ascending order (the last event is the + # most recent). + events.reverse() log_kv({"loaded_recents": len(events)}) @@ -2641,9 +2670,10 @@ class SyncHandler: # a "gap" in the timeline, as described by the spec for /sync. room_to_events = await self.store.get_room_events_stream_for_rooms( room_ids=sync_result_builder.joined_room_ids, - from_key=since_token.room_key, - to_key=now_token.room_key, + from_key=now_token.room_key, + to_key=since_token.room_key, limit=timeline_limit + 1, + direction=Direction.BACKWARDS, ) # We loop through all room ids, even if there are no new events, in case @@ -2654,6 +2684,9 @@ class SyncHandler: newly_joined = room_id in newly_joined_rooms if room_entry: events, start_key = room_entry + # We want to return the events in ascending order (the last event is the + # most recent). + events.reverse() prev_batch_token = now_token.copy_and_replace( StreamKeyType.ROOM, start_key diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 95775e3804..4989c960a6 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -51,6 +51,7 @@ from typing import ( Iterable, List, Optional, + Protocol, Set, Tuple, cast, @@ -59,7 +60,7 @@ from typing import ( import attr from immutabledict import immutabledict -from typing_extensions import Literal +from typing_extensions import Literal, assert_never from twisted.internet import defer @@ -97,6 +98,18 @@ _STREAM_TOKEN = "stream" _TOPOLOGICAL_TOKEN = "topological" +class PaginateFunction(Protocol): + async def __call__( + self, + *, + room_id: str, + from_key: RoomStreamToken, + to_key: Optional[RoomStreamToken] = None, + direction: Direction = Direction.BACKWARDS, + limit: int = 0, + ) -> Tuple[List[EventBase], RoomStreamToken]: ... + + # Used as return values for pagination APIs @attr.s(slots=True, frozen=True, auto_attribs=True) class _EventDictReturn: @@ -280,7 +293,7 @@ def generate_pagination_bounds( def generate_next_token( - direction: Direction, last_topo_ordering: int, last_stream_ordering: int + direction: Direction, last_topo_ordering: Optional[int], last_stream_ordering: int ) -> RoomStreamToken: """ Generate the next room stream token based on the currently returned data. @@ -447,7 +460,6 @@ def _filter_results_by_stream( The `instance_name` arg is optional to handle historic rows, and is interpreted as if it was "master". """ - if instance_name is None: instance_name = "master" @@ -660,33 +672,43 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): async def get_room_events_stream_for_rooms( self, + *, room_ids: Collection[str], from_key: RoomStreamToken, - to_key: RoomStreamToken, + to_key: Optional[RoomStreamToken] = None, + direction: Direction = Direction.BACKWARDS, limit: int = 0, - order: str = "DESC", ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken]]: """Get new room events in stream ordering since `from_key`. Args: room_ids - from_key: Token from which no events are returned before - to_key: Token from which no events are returned after. (This - is typically the current stream token) + from_key: The token to stream from (starting point and heading in the given + direction) + to_key: The token representing the end stream position (end point) limit: Maximum number of events to return - order: Either "DESC" or "ASC". Determines which events are - returned when the result is limited. If "DESC" then the most - recent `limit` events are returned, otherwise returns the - oldest `limit` events. + direction: Indicates whether we are paginating forwards or backwards + from `from_key`. Returns: A map from room id to a tuple containing: - list of recent events in the room - stream ordering key for the start of the chunk of events returned. + + When Direction.FORWARDS: from_key < x <= to_key, (ascending order) + When Direction.BACKWARDS: from_key >= x > to_key, (descending order) """ - room_ids = self._events_stream_cache.get_entities_changed( - room_ids, from_key.stream - ) + if direction == Direction.FORWARDS: + room_ids = self._events_stream_cache.get_entities_changed( + room_ids, from_key.stream + ) + elif direction == Direction.BACKWARDS: + if to_key is not None: + room_ids = self._events_stream_cache.get_entities_changed( + room_ids, to_key.stream + ) + else: + assert_never(direction) if not room_ids: return {} @@ -698,12 +720,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): defer.gatherResults( [ run_in_background( - self.get_room_events_stream_for_room, - room_id, - from_key, - to_key, - limit, - order=order, + self.paginate_room_events_by_stream_ordering, + room_id=room_id, + from_key=from_key, + to_key=to_key, + direction=direction, + limit=limit, ) for room_id in rm_ids ], @@ -727,69 +749,122 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if self._events_stream_cache.has_entity_changed(room_id, from_id) } - async def get_room_events_stream_for_room( + async def paginate_room_events_by_stream_ordering( self, + *, room_id: str, from_key: RoomStreamToken, - to_key: RoomStreamToken, + to_key: Optional[RoomStreamToken] = None, + direction: Direction = Direction.BACKWARDS, limit: int = 0, - order: str = "DESC", ) -> Tuple[List[EventBase], RoomStreamToken]: - """Get new room events in stream ordering since `from_key`. + """ + Paginate events by `stream_ordering` in the room from the `from_key` in the + given `direction` to the `to_key` or `limit`. Args: room_id - from_key: Token from which no events are returned before - to_key: Token from which no events are returned after. (This - is typically the current stream token) + from_key: The token to stream from (starting point and heading in the given + direction) + to_key: The token representing the end stream position (end point) + direction: Indicates whether we are paginating forwards or backwards + from `from_key`. limit: Maximum number of events to return - order: Either "DESC" or "ASC". Determines which events are - returned when the result is limited. If "DESC" then the most - recent `limit` events are returned, otherwise returns the - oldest `limit` events. Returns: - The list of events (in ascending stream order) and the token from the start - of the chunk of events returned. - """ - if from_key == to_key: - return [], from_key + The results as a list of events and a token that points to the end + of the result set. If no events are returned then the end of the + stream has been reached (i.e. there are no events between `from_key` + and `to_key`). - has_changed = self._events_stream_cache.has_entity_changed( - room_id, from_key.stream - ) + When Direction.FORWARDS: from_key < x <= to_key, (ascending order) + When Direction.BACKWARDS: from_key >= x > to_key, (descending order) + """ + + # FIXME: When going forwards, we should enforce that the `to_key` is not `None` + # because we always need an upper bound when querying the events stream (as + # otherwise we'll potentially pick up events that are not fully persisted). + + # We should only be working with `stream_ordering` tokens here + assert from_key is None or from_key.topological is None + assert to_key is None or to_key.topological is None + + # We can bail early if we're looking forwards, and our `to_key` is already + # before our `from_key`. + if ( + direction == Direction.FORWARDS + and to_key is not None + and to_key.is_before_or_eq(from_key) + ): + # Token selection matches what we do below if there are no rows + return [], to_key if to_key else from_key + # Or vice-versa, if we're looking backwards and our `from_key` is already before + # our `to_key`. + elif ( + direction == Direction.BACKWARDS + and to_key is not None + and from_key.is_before_or_eq(to_key) + ): + # Token selection matches what we do below if there are no rows + return [], to_key if to_key else from_key + + # We can do a quick sanity check to see if any events have been sent in the room + # since the earlier token. + has_changed = True + if direction == Direction.FORWARDS: + has_changed = self._events_stream_cache.has_entity_changed( + room_id, from_key.stream + ) + elif direction == Direction.BACKWARDS: + if to_key is not None: + has_changed = self._events_stream_cache.has_entity_changed( + room_id, to_key.stream + ) + else: + assert_never(direction) if not has_changed: - return [], from_key + # Token selection matches what we do below if there are no rows + return [], to_key if to_key else from_key + + order, from_bound, to_bound = generate_pagination_bounds( + direction, from_key, to_key + ) + + bounds = generate_pagination_where_clause( + direction=direction, + # The empty string will shortcut downstream code to only use the + # `stream_ordering` column + column_names=("", "stream_ordering"), + from_token=from_bound, + to_token=to_bound, + engine=self.database_engine, + ) def f(txn: LoggingTransaction) -> List[_EventDictReturn]: - # To handle tokens with a non-empty instance_map we fetch more - # results than necessary and then filter down - min_from_id = from_key.stream - max_to_id = to_key.get_max_stream_pos() - - sql = """ - SELECT event_id, instance_name, topological_ordering, stream_ordering + sql = f""" + SELECT event_id, instance_name, stream_ordering FROM events WHERE room_id = ? AND not outlier - AND stream_ordering > ? AND stream_ordering <= ? - ORDER BY stream_ordering %s LIMIT ? - """ % ( - order, - ) - txn.execute(sql, (room_id, min_from_id, max_to_id, 2 * limit)) + AND {bounds} + ORDER BY stream_ordering {order} LIMIT ? + """ + txn.execute(sql, (room_id, 2 * limit)) rows = [ _EventDictReturn(event_id, None, stream_ordering) - for event_id, instance_name, topological_ordering, stream_ordering in txn - if _filter_results( - from_key, - to_key, - instance_name, - topological_ordering, - stream_ordering, + for event_id, instance_name, stream_ordering in txn + if _filter_results_by_stream( + lower_token=( + to_key if direction == Direction.BACKWARDS else from_key + ), + upper_token=( + from_key if direction == Direction.BACKWARDS else to_key + ), + instance_name=instance_name, + stream_ordering=stream_ordering, ) ][:limit] return rows @@ -800,17 +875,18 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): [r.event_id for r in rows], get_prev_content=True ) - if order.lower() == "desc": - ret.reverse() - if rows: - key = RoomStreamToken(stream=min(r.stream_ordering for r in rows)) + next_key = generate_next_token( + direction=direction, + last_topo_ordering=None, + last_stream_ordering=rows[-1].stream_ordering, + ) else: - # Assume we didn't get anything because there was nothing to - # get. - key = from_key + # TODO (erikj): We should work out what to do here instead. (same as + # `_paginate_room_events_by_topological_ordering_txn(...)`) + next_key = to_key if to_key else from_key - return ret, key + return ret, next_key @trace async def get_current_state_delta_membership_changes_for_user( @@ -1118,7 +1194,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): rows, token = await self.db_pool.runInteraction( "get_recent_event_ids_for_room", - self._paginate_room_events_txn, + self._paginate_room_events_by_topological_ordering_txn, room_id, from_token=end_token, limit=limit, @@ -1624,7 +1700,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): topological=topological_ordering, stream=stream_ordering ) - rows, start_token = self._paginate_room_events_txn( + rows, start_token = self._paginate_room_events_by_topological_ordering_txn( txn, room_id, before_token, @@ -1634,7 +1710,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) events_before = [r.event_id for r in rows] - rows, end_token = self._paginate_room_events_txn( + rows, end_token = self._paginate_room_events_by_topological_ordering_txn( txn, room_id, after_token, @@ -1797,14 +1873,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def has_room_changed_since(self, room_id: str, stream_id: int) -> bool: return self._events_stream_cache.has_entity_changed(room_id, stream_id) - def _paginate_room_events_txn( + def _paginate_room_events_by_topological_ordering_txn( self, txn: LoggingTransaction, room_id: str, from_token: RoomStreamToken, to_token: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, - limit: int = -1, + limit: int = 0, event_filter: Optional[Filter] = None, ) -> Tuple[List[_EventDictReturn], RoomStreamToken]: """Returns list of events before or after a given token. @@ -1826,6 +1902,24 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): been reached (i.e. there are no events between `from_token` and `to_token`), or `limit` is zero. """ + # We can bail early if we're looking forwards, and our `to_key` is already + # before our `from_token`. + if ( + direction == Direction.FORWARDS + and to_token is not None + and to_token.is_before_or_eq(from_token) + ): + # Token selection matches what we do below if there are no rows + return [], to_token if to_token else from_token + # Or vice-versa, if we're looking backwards and our `from_token` is already before + # our `to_token`. + elif ( + direction == Direction.BACKWARDS + and to_token is not None + and from_token.is_before_or_eq(to_token) + ): + # Token selection matches what we do below if there are no rows + return [], to_token if to_token else from_token args: List[Any] = [room_id] @@ -1910,7 +2004,6 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): "bounds": bounds, "order": order, } - txn.execute(sql, args) # Filter the result set. @@ -1943,27 +2036,29 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): @trace @tag_args - async def paginate_room_events( + async def paginate_room_events_by_topological_ordering( self, + *, room_id: str, from_key: RoomStreamToken, to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, - limit: int = -1, + limit: int = 0, event_filter: Optional[Filter] = None, ) -> Tuple[List[EventBase], RoomStreamToken]: - """Returns list of events before or after a given token. - - When Direction.FORWARDS: from_key < x <= to_key - When Direction.BACKWARDS: from_key >= x > to_key + """ + Paginate events by `topological_ordering` (tie-break with `stream_ordering`) in + the room from the `from_key` in the given `direction` to the `to_key` or + `limit`. Args: room_id - from_key: The token used to stream from - to_key: A token which if given limits the results to only those before + from_key: The token to stream from (starting point and heading in the given + direction) + to_key: The token representing the end stream position (end point) direction: Indicates whether we are paginating forwards or backwards from `from_key`. - limit: The maximum number of events to return. + limit: Maximum number of events to return event_filter: If provided filters the events to those that match the filter. Returns: @@ -1971,8 +2066,18 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): of the result set. If no events are returned then the end of the stream has been reached (i.e. there are no events between `from_key` and `to_key`). + + When Direction.FORWARDS: from_key < x <= to_key, (ascending order) + When Direction.BACKWARDS: from_key >= x > to_key, (descending order) """ + # FIXME: When going forwards, we should enforce that the `to_key` is not `None` + # because we always need an upper bound when querying the events stream (as + # otherwise we'll potentially pick up events that are not fully persisted). + + # We have these checks outside of the transaction function (txn) to save getting + # a DB connection and switching threads if we don't need to. + # # We can bail early if we're looking forwards, and our `to_key` is already # before our `from_key`. if ( @@ -1995,8 +2100,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return [], to_key if to_key else from_key rows, token = await self.db_pool.runInteraction( - "paginate_room_events", - self._paginate_room_events_txn, + "paginate_room_events_by_topological_ordering", + self._paginate_room_events_by_topological_ordering_txn, room_id, from_key, to_key, diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index 9dea1af8ea..7b7590da76 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -148,7 +148,7 @@ class PaginationTestCase(HomeserverTestCase): """Make a request to /messages with a filter, returns the chunk of events.""" events, next_key = self.get_success( - self.hs.get_datastores().main.paginate_room_events( + self.hs.get_datastores().main.paginate_room_events_by_topological_ordering( room_id=self.room_id, from_key=self.from_token.room_key, to_key=None, From 44ac2aa3b69545e02c849276d979c117a2b42070 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Aug 2024 10:44:17 +0100 Subject: [PATCH 105/332] SSS: Implement PREVIOUSLY room tracking (#17535) Implement tracking of rooms that have had updates that have not been sent down to clients. Simplified Sliding Sync (SSS) --- changelog.d/17535.bugfix | 1 + synapse/handlers/sliding_sync.py | 68 +++++++++++++----- .../sliding_sync/test_connection_tracking.py | 72 ------------------- 3 files changed, 53 insertions(+), 88 deletions(-) create mode 100644 changelog.d/17535.bugfix diff --git a/changelog.d/17535.bugfix b/changelog.d/17535.bugfix new file mode 100644 index 0000000000..c5b5da0485 --- /dev/null +++ b/changelog.d/17535.bugfix @@ -0,0 +1 @@ +Fix experimental sliding sync implementation to remember any updates in rooms that were not sent down immediately. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 0fe66c8bd2..18a96843be 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -543,6 +543,9 @@ class SlidingSyncHandler: lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} # Keep track of the rooms that we can display and need to fetch more info about relevant_room_map: Dict[str, RoomSyncConfig] = {} + # The set of room IDs of all rooms that could appear in any list. These + # include rooms that are outside the list ranges. + all_rooms: Set[str] = set() if has_lists and sync_config.lists is not None: with start_active_span("assemble_sliding_window_lists"): sync_room_map = await self.filter_rooms_relevant_for_sync( @@ -561,11 +564,6 @@ class SlidingSyncHandler: to_token, ) - # Sort the list - sorted_room_info = await self.sort_rooms( - filtered_sync_room_map, to_token - ) - # Find which rooms are partially stated and may need to be filtered out # depending on the `required_state` requested (see below). partial_state_room_map = ( @@ -586,6 +584,23 @@ class SlidingSyncHandler: and StateValues.LAZY in membership_state_keys ) + if not lazy_loading: + # Exclude partially-stated rooms unless the `required_state` + # only has `["m.room.member", "$LAZY"]` for membership + # (lazy-loading room members). + filtered_sync_room_map = { + room_id: room + for room_id, room in filtered_sync_room_map.items() + if not partial_state_room_map.get(room_id) + } + + all_rooms.update(filtered_sync_room_map) + + # Sort the list + sorted_room_info = await self.sort_rooms( + filtered_sync_room_map, to_token + ) + ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] if list_config.ranges: for range in list_config.ranges: @@ -603,15 +618,6 @@ class SlidingSyncHandler: if len(room_ids_in_list) >= max_num_rooms: break - # Exclude partially-stated rooms unless the `required_state` - # only has `["m.room.member", "$LAZY"]` for membership - # (lazy-loading room members). - if ( - partial_state_room_map.get(room_id) - and not lazy_loading - ): - continue - # Take the superset of the `RoomSyncConfig` for each room. # # Update our `relevant_room_map` with the room we're going @@ -664,6 +670,8 @@ class SlidingSyncHandler: if not room_membership_for_user_at_to_token: continue + all_rooms.add(room_id) + room_membership_for_user_map[room_id] = ( room_membership_for_user_at_to_token ) @@ -771,12 +779,40 @@ class SlidingSyncHandler: ) if has_lists or has_room_subscriptions: + # We now calculate if any rooms outside the range have had updates, + # which we are not sending down. + # + # We *must* record rooms that have had updates, but it is also fine + # to record rooms as having updates even if there might not actually + # be anything new for the user (e.g. due to event filters, events + # having happened after the user left, etc). + unsent_room_ids = [] + if from_token: + # The set of rooms that the client (may) care about, but aren't + # in any list range (or subscribed to). + missing_rooms = all_rooms - relevant_room_map.keys() + + # We now just go and try fetching any events in the above rooms + # to see if anything has happened since the `from_token`. + # + # TODO: Replace this with something faster. When we land the + # sliding sync tables that record the most recent event + # positions we can use that. + missing_event_map_by_room = ( + await self.store.get_room_events_stream_for_rooms( + room_ids=missing_rooms, + from_key=to_token.room_key, + to_key=from_token.stream_token.room_key, + limit=1, + ) + ) + unsent_room_ids = list(missing_event_map_by_room) + connection_position = await self.connection_store.record_rooms( sync_config=sync_config, from_token=from_token, sent_room_ids=relevant_rooms_to_send_map.keys(), - # TODO: We need to calculate which rooms have had updates since the `from_token` but were not included in the `sent_room_ids` - unsent_room_ids=[], + unsent_room_ids=unsent_room_ids, ) elif from_token: connection_position = from_token.connection_position diff --git a/tests/rest/client/sliding_sync/test_connection_tracking.py b/tests/rest/client/sliding_sync/test_connection_tracking.py index 4d8866b30a..6863c32f7c 100644 --- a/tests/rest/client/sliding_sync/test_connection_tracking.py +++ b/tests/rest/client/sliding_sync/test_connection_tracking.py @@ -21,8 +21,6 @@ import synapse.rest.admin from synapse.api.constants import EventTypes from synapse.rest.client import login, room, sync from synapse.server import HomeServer -from synapse.types import SlidingSyncStreamToken -from synapse.types.handlers import SlidingSyncConfig from synapse.util import Clock from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase @@ -130,7 +128,6 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase): self.helper.send(room_id1, "msg", tok=user1_tok) timeline_limit = 5 - conn_id = "conn_id" sync_body = { "lists": { "foo-list": { @@ -170,40 +167,6 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase): response_body["rooms"].keys(), {room_id2}, response_body["rooms"] ) - # FIXME: This is a hack to record that the first room wasn't sent down - # sync, as we don't implement that currently. - sliding_sync_handler = self.hs.get_sliding_sync_handler() - requester = self.get_success( - self.hs.get_auth().get_user_by_access_token(user1_tok) - ) - sync_config = SlidingSyncConfig( - user=requester.user, - requester=requester, - conn_id=conn_id, - ) - - parsed_initial_from_token = self.get_success( - SlidingSyncStreamToken.from_string(self.store, initial_from_token) - ) - connection_position = self.get_success( - sliding_sync_handler.connection_store.record_rooms( - sync_config, - parsed_initial_from_token, - sent_room_ids=[], - unsent_room_ids=[room_id1], - ) - ) - - # FIXME: Now fix up `from_token` with new connect position above. - parsed_from_token = self.get_success( - SlidingSyncStreamToken.from_string(self.store, from_token) - ) - parsed_from_token = SlidingSyncStreamToken( - stream_token=parsed_from_token.stream_token, - connection_position=connection_position, - ) - from_token = self.get_success(parsed_from_token.to_string(self.store)) - # We now send another event to room1, so we should sync all the missing events. resp = self.helper.send(room_id1, "msg2", tok=user1_tok) expected_events.append(resp["event_id"]) @@ -238,7 +201,6 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase): self.helper.send(room_id1, "msg", tok=user1_tok) - conn_id = "conn_id" sync_body = { "lists": { "foo-list": { @@ -279,40 +241,6 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase): response_body["rooms"].keys(), {room_id2}, response_body["rooms"] ) - # FIXME: This is a hack to record that the first room wasn't sent down - # sync, as we don't implement that currently. - sliding_sync_handler = self.hs.get_sliding_sync_handler() - requester = self.get_success( - self.hs.get_auth().get_user_by_access_token(user1_tok) - ) - sync_config = SlidingSyncConfig( - user=requester.user, - requester=requester, - conn_id=conn_id, - ) - - parsed_initial_from_token = self.get_success( - SlidingSyncStreamToken.from_string(self.store, initial_from_token) - ) - connection_position = self.get_success( - sliding_sync_handler.connection_store.record_rooms( - sync_config, - parsed_initial_from_token, - sent_room_ids=[], - unsent_room_ids=[room_id1], - ) - ) - - # FIXME: Now fix up `from_token` with new connect position above. - parsed_from_token = self.get_success( - SlidingSyncStreamToken.from_string(self.store, from_token) - ) - parsed_from_token = SlidingSyncStreamToken( - stream_token=parsed_from_token.stream_token, - connection_position=connection_position, - ) - from_token = self.get_success(parsed_from_token.to_string(self.store)) - # We now send another event to room1, so we should sync all the missing state. self.helper.send(room_id1, "msg", tok=user1_tok) From 3ad38b644d1c344dfc0e2d60c924ecacebd7aa67 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 8 Aug 2024 15:59:37 +0200 Subject: [PATCH 106/332] Replace deprecated `HTTPAdapter.get_connection` method with `get_connection_with_tls_context` (#17536) --- changelog.d/17536.misc | 1 + scripts-dev/federation_client.py | 25 ++++++++++++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) create mode 100644 changelog.d/17536.misc diff --git a/changelog.d/17536.misc b/changelog.d/17536.misc new file mode 100644 index 0000000000..116ef0c36d --- /dev/null +++ b/changelog.d/17536.misc @@ -0,0 +1 @@ +Replace override of deprecated method `HTTPAdapter.get_connection` with `get_connection_with_tls_context`. \ No newline at end of file diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index 4c758e5424..fb879ef555 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -43,7 +43,7 @@ import argparse import base64 import json import sys -from typing import Any, Dict, Optional, Tuple +from typing import Any, Dict, Mapping, Optional, Tuple, Union from urllib import parse as urlparse import requests @@ -75,7 +75,7 @@ def encode_canonical_json(value: object) -> bytes: value, # Encode code-points outside of ASCII as UTF-8 rather than \u escapes ensure_ascii=False, - # Remove unecessary white space. + # Remove unnecessary white space. separators=(",", ":"), # Sort the keys of dictionaries. sort_keys=True, @@ -298,12 +298,23 @@ class MatrixConnectionAdapter(HTTPAdapter): return super().send(request, *args, **kwargs) - def get_connection( - self, url: str, proxies: Optional[Dict[str, str]] = None + def get_connection_with_tls_context( + self, + request: PreparedRequest, + verify: Optional[Union[bool, str]], + proxies: Optional[Mapping[str, str]] = None, + cert: Optional[Union[Tuple[str, str], str]] = None, ) -> HTTPConnectionPool: - # overrides the get_connection() method in the base class - parsed = urlparse.urlsplit(url) - (host, port, ssl_server_name) = self._lookup(parsed.netloc) + # overrides the get_connection_with_tls_context() method in the base class + parsed = urlparse.urlsplit(request.url) + + # Extract the server name from the request URL, and ensure it's a str. + hostname = parsed.netloc + if isinstance(hostname, bytes): + hostname = hostname.decode("utf-8") + assert isinstance(hostname, str) + + (host, port, ssl_server_name) = self._lookup(hostname) print( f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr ) From f31360e34b989059e79d9af6fb8d776f92474149 Mon Sep 17 00:00:00 2001 From: devonh Date: Thu, 8 Aug 2024 14:35:46 +0000 Subject: [PATCH 107/332] Start handlers for new media endpoints when media resource configured (#17483) This is in response to issue #17473. Not all the necessary handlers to deal with media requests are started now when configuring synapse to use a media worker as per the [example config](https://element-hq.github.io/synapse/latest/workers.html#synapseappmedia_repository). The new media endpoints introduced with authenticated media fall under the `client` & `federation` handlers in synapse. This PR starts up handlers for the new media endpoints if a worker has been configured with only the `media` resource type. ### Pull Request Checklist * [X] Pull request is based on the develop branch * [x] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [X] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --------- Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17483.bugfix | 1 + synapse/app/generic_worker.py | 15 ++ synapse/app/homeserver.py | 14 ++ .../federation/transport/server/__init__.py | 4 + .../federation/transport/server/federation.py | 2 - synapse/rest/__init__.py | 166 +++++++++++------- 6 files changed, 132 insertions(+), 70 deletions(-) create mode 100644 changelog.d/17483.bugfix diff --git a/changelog.d/17483.bugfix b/changelog.d/17483.bugfix new file mode 100644 index 0000000000..c97a802dbf --- /dev/null +++ b/changelog.d/17483.bugfix @@ -0,0 +1 @@ +Start handlers for new media endpoints when media resource configured. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 248622fa92..53f1859256 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -206,6 +206,21 @@ class GenericWorkerServer(HomeServer): "/_synapse/admin": admin_resource, } ) + + if "federation" not in res.names: + # Only load the federation media resource separately if federation + # resource is not specified since federation resource includes media + # resource. + resources[FEDERATION_PREFIX] = TransportLayerServer( + self, servlet_groups=["media"] + ) + if "client" not in res.names: + # Only load the client media resource separately if client + # resource is not specified since client resource includes media + # resource. + resources[CLIENT_API_PREFIX] = ClientRestResource( + self, servlet_groups=["media"] + ) else: logger.warning( "A 'media' listener is configured but the media" diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e114ab7ec4..2a824e8457 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -101,6 +101,12 @@ class SynapseHomeServer(HomeServer): # Skip loading openid resource if federation is defined # since federation resource will include openid continue + if name == "media" and ( + "federation" in res.names or "client" in res.names + ): + # Skip loading media resource if federation or client are defined + # since federation & client resources will include media + continue if name == "health": # Skip loading, health resource is always included continue @@ -231,6 +237,14 @@ class SynapseHomeServer(HomeServer): "'media' resource conflicts with enable_media_repo=False" ) + if name == "media": + resources[FEDERATION_PREFIX] = TransportLayerServer( + self, servlet_groups=["media"] + ) + resources[CLIENT_API_PREFIX] = ClientRestResource( + self, servlet_groups=["media"] + ) + if name in ["keys", "federation"]: resources[SERVER_KEY_PREFIX] = KeyResource(self) diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index 72599bb204..43102567db 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -271,6 +271,10 @@ SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = { "federation": FEDERATION_SERVLET_CLASSES, "room_list": (PublicRoomList,), "openid": (OpenIdUserInfo,), + "media": ( + FederationMediaDownloadServlet, + FederationMediaThumbnailServlet, + ), } diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index b075a86f68..20f87c885e 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -912,6 +912,4 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = ( FederationV1SendKnockServlet, FederationMakeKnockServlet, FederationAccountStatusServlet, - FederationMediaDownloadServlet, - FederationMediaThumbnailServlet, ) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 1aa9ea3877..c5cdc36955 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -18,7 +18,8 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Callable +import logging +from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple from synapse.http.server import HttpServer, JsonResource from synapse.rest import admin @@ -67,11 +68,64 @@ from synapse.rest.client import ( voip, ) +logger = logging.getLogger(__name__) + if TYPE_CHECKING: from synapse.server import HomeServer RegisterServletsFunc = Callable[["HomeServer", HttpServer], None] +CLIENT_SERVLET_FUNCTIONS: Tuple[RegisterServletsFunc, ...] = ( + versions.register_servlets, + initial_sync.register_servlets, + room.register_deprecated_servlets, + events.register_servlets, + room.register_servlets, + login.register_servlets, + profile.register_servlets, + presence.register_servlets, + directory.register_servlets, + voip.register_servlets, + pusher.register_servlets, + push_rule.register_servlets, + logout.register_servlets, + sync.register_servlets, + filter.register_servlets, + account.register_servlets, + register.register_servlets, + auth.register_servlets, + receipts.register_servlets, + read_marker.register_servlets, + room_keys.register_servlets, + keys.register_servlets, + tokenrefresh.register_servlets, + tags.register_servlets, + account_data.register_servlets, + reporting.register_servlets, + openid.register_servlets, + notifications.register_servlets, + devices.register_servlets, + thirdparty.register_servlets, + sendtodevice.register_servlets, + user_directory.register_servlets, + room_upgrade_rest_servlet.register_servlets, + capabilities.register_servlets, + account_validity.register_servlets, + relations.register_servlets, + password_policy.register_servlets, + knock.register_servlets, + appservice_ping.register_servlets, + admin.register_servlets_for_client_rest_resource, + mutual_rooms.register_servlets, + login_token_request.register_servlets, + rendezvous.register_servlets, + auth_issuer.register_servlets, +) + +SERVLET_GROUPS: Dict[str, Iterable[RegisterServletsFunc]] = { + "client": CLIENT_SERVLET_FUNCTIONS, +} + class ClientRestResource(JsonResource): """Matrix Client API REST resource. @@ -83,80 +137,56 @@ class ClientRestResource(JsonResource): * etc """ - def __init__(self, hs: "HomeServer"): + def __init__(self, hs: "HomeServer", servlet_groups: Optional[List[str]] = None): JsonResource.__init__(self, hs, canonical_json=False) - self.register_servlets(self, hs) + if hs.config.media.can_load_media_repo: + # This import is here to prevent a circular import failure + from synapse.rest.client import media + + SERVLET_GROUPS["media"] = (media.register_servlets,) + self.register_servlets(self, hs, servlet_groups) @staticmethod - def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None: + def register_servlets( + client_resource: HttpServer, + hs: "HomeServer", + servlet_groups: Optional[Iterable[str]] = None, + ) -> None: # Some servlets are only registered on the main process (and not worker # processes). is_main_process = hs.config.worker.worker_app is None - versions.register_servlets(hs, client_resource) + if not servlet_groups: + servlet_groups = SERVLET_GROUPS.keys() - # Deprecated in r0 - initial_sync.register_servlets(hs, client_resource) - room.register_deprecated_servlets(hs, client_resource) + for servlet_group in servlet_groups: + # Fail on unknown servlet groups. + if servlet_group not in SERVLET_GROUPS: + if servlet_group == "media": + logger.warn( + "media.can_load_media_repo needs to be configured for the media servlet to be available" + ) + raise RuntimeError( + f"Attempting to register unknown client servlet: '{servlet_group}'" + ) - # Partially deprecated in r0 - events.register_servlets(hs, client_resource) + for servletfunc in SERVLET_GROUPS[servlet_group]: + if not is_main_process and servletfunc in [ + pusher.register_servlets, + logout.register_servlets, + auth.register_servlets, + tokenrefresh.register_servlets, + reporting.register_servlets, + openid.register_servlets, + thirdparty.register_servlets, + room_upgrade_rest_servlet.register_servlets, + account_validity.register_servlets, + admin.register_servlets_for_client_rest_resource, + mutual_rooms.register_servlets, + login_token_request.register_servlets, + rendezvous.register_servlets, + auth_issuer.register_servlets, + ]: + continue - room.register_servlets(hs, client_resource) - login.register_servlets(hs, client_resource) - profile.register_servlets(hs, client_resource) - presence.register_servlets(hs, client_resource) - directory.register_servlets(hs, client_resource) - voip.register_servlets(hs, client_resource) - if is_main_process: - pusher.register_servlets(hs, client_resource) - push_rule.register_servlets(hs, client_resource) - if is_main_process: - logout.register_servlets(hs, client_resource) - sync.register_servlets(hs, client_resource) - filter.register_servlets(hs, client_resource) - account.register_servlets(hs, client_resource) - register.register_servlets(hs, client_resource) - if is_main_process: - auth.register_servlets(hs, client_resource) - receipts.register_servlets(hs, client_resource) - read_marker.register_servlets(hs, client_resource) - room_keys.register_servlets(hs, client_resource) - keys.register_servlets(hs, client_resource) - if is_main_process: - tokenrefresh.register_servlets(hs, client_resource) - tags.register_servlets(hs, client_resource) - account_data.register_servlets(hs, client_resource) - if is_main_process: - reporting.register_servlets(hs, client_resource) - openid.register_servlets(hs, client_resource) - notifications.register_servlets(hs, client_resource) - devices.register_servlets(hs, client_resource) - if is_main_process: - thirdparty.register_servlets(hs, client_resource) - sendtodevice.register_servlets(hs, client_resource) - user_directory.register_servlets(hs, client_resource) - if is_main_process: - room_upgrade_rest_servlet.register_servlets(hs, client_resource) - capabilities.register_servlets(hs, client_resource) - if is_main_process: - account_validity.register_servlets(hs, client_resource) - relations.register_servlets(hs, client_resource) - password_policy.register_servlets(hs, client_resource) - knock.register_servlets(hs, client_resource) - appservice_ping.register_servlets(hs, client_resource) - if hs.config.media.can_load_media_repo: - from synapse.rest.client import media - - media.register_servlets(hs, client_resource) - - # moving to /_synapse/admin - if is_main_process: - admin.register_servlets_for_client_rest_resource(hs, client_resource) - - # unstable - if is_main_process: - mutual_rooms.register_servlets(hs, client_resource) - login_token_request.register_servlets(hs, client_resource) - rendezvous.register_servlets(hs, client_resource) - auth_issuer.register_servlets(hs, client_resource) + servletfunc(hs, client_resource) From 70b0e386032ed2f3ecf25fcf9a4b6c31335ffdc4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 9 Aug 2024 11:59:44 +0100 Subject: [PATCH 108/332] Fix performance of device lists in `/key/changes` and sliding sync (#17537) We do this by reusing the code from sync v2. Reviewable commit-by-commit. The function `get_user_ids_changed` has been rewritten entirely, so I would recommend not looking at the diff. --- changelog.d/17537.misc | 1 + synapse/handlers/device.py | 299 +++++++++++------- synapse/handlers/sync.py | 111 ++----- .../storage/databases/main/state_deltas.py | 5 + 4 files changed, 213 insertions(+), 203 deletions(-) create mode 100644 changelog.d/17537.misc diff --git a/changelog.d/17537.misc b/changelog.d/17537.misc new file mode 100644 index 0000000000..861b241dcd --- /dev/null +++ b/changelog.d/17537.misc @@ -0,0 +1 @@ +Fix performance of device lists in `/key/changes` and sliding sync. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 4fc6fcd7ae..ce26c91a7b 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -20,10 +20,20 @@ # # import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + AbstractSet, + Dict, + Iterable, + List, + Mapping, + Optional, + Set, + Tuple, +) from synapse.api import errors -from synapse.api.constants import EduTypes, EventTypes +from synapse.api.constants import EduTypes, EventTypes, Membership from synapse.api.errors import ( Codes, FederationDeniedError, @@ -38,6 +48,7 @@ from synapse.metrics.background_process_metrics import ( wrap_as_background_process, ) from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo +from synapse.storage.databases.main.state_deltas import StateDelta from synapse.types import ( DeviceListUpdates, JsonDict, @@ -222,129 +233,115 @@ class DeviceWorkerHandler: set_tag("user_id", user_id) set_tag("from_token", str(from_token)) - now_room_key = self.store.get_room_max_token() - room_ids = await self.store.get_rooms_for_user(user_id) + now_token = self._event_sources.get_current_token() - changed = await self.get_device_changes_in_shared_rooms( - user_id, room_ids, from_token + # We need to work out all the different membership changes for the user + # and user they share a room with, to pass to + # `generate_sync_entry_for_device_list`. See its docstring for details + # on the data required. + + joined_room_ids = await self.store.get_rooms_for_user(user_id) + + # Get the set of rooms that the user has joined/left + membership_changes = ( + await self.store.get_current_state_delta_membership_changes_for_user( + user_id, from_key=from_token.room_key, to_key=now_token.room_key + ) ) - # Then work out if any users have since joined - rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key) + # Check for newly joined or left rooms. We need to make sure that we add + # to newly joined in the case membership goes from join -> leave -> join + # again. + newly_joined_rooms: Set[str] = set() + newly_left_rooms: Set[str] = set() + for change in membership_changes: + # We check for changes in "joinedness", i.e. if the membership has + # changed to or from JOIN. + if change.membership == Membership.JOIN: + if change.prev_membership != Membership.JOIN: + newly_joined_rooms.add(change.room_id) + newly_left_rooms.discard(change.room_id) + elif change.prev_membership == Membership.JOIN: + newly_joined_rooms.discard(change.room_id) + newly_left_rooms.add(change.room_id) - member_events = await self.store.get_membership_changes_for_user( - user_id, from_token.room_key, now_room_key + # We now work out if any other users have since joined or left the rooms + # the user is currently in. First we filter out rooms that we know + # haven't changed recently. + rooms_changed = self.store.get_rooms_that_changed( + joined_room_ids, from_token.room_key ) - rooms_changed.update(event.room_id for event in member_events) - stream_ordering = from_token.room_key.stream - - possibly_changed = set(changed) - possibly_left = set() + # List of membership changes per room + room_to_deltas: Dict[str, List[StateDelta]] = {} + # The set of event IDs of membership events (so we can fetch their + # associated membership). + memberships_to_fetch: Set[str] = set() for room_id in rooms_changed: - # Check if the forward extremities have changed. If not then we know - # the current state won't have changed, and so we can skip this room. - try: - if not await self.store.have_room_forward_extremities_changed_since( - room_id, stream_ordering - ): - continue - except errors.StoreError: - pass - - current_state_ids = await self._state_storage.get_current_state_ids( - room_id, await_full_state=False + # TODO: Only pull out membership events? + state_changes = await self.store.get_current_state_deltas_for_room( + room_id, from_token=from_token.room_key, to_token=now_token.room_key ) - - # The user may have left the room - # TODO: Check if they actually did or if we were just invited. - if room_id not in room_ids: - for etype, state_key in current_state_ids.keys(): - if etype != EventTypes.Member: - continue - possibly_left.add(state_key) - continue - - # Fetch the current state at the time. - try: - event_ids = await self.store.get_forward_extremities_for_room_at_stream_ordering( - room_id, stream_ordering=stream_ordering - ) - except errors.StoreError: - # we have purged the stream_ordering index since the stream - # ordering: treat it the same as a new room - event_ids = [] - - # special-case for an empty prev state: include all members - # in the changed list - if not event_ids: - log_kv( - {"event": "encountered empty previous state", "room_id": room_id} - ) - for etype, state_key in current_state_ids.keys(): - if etype != EventTypes.Member: - continue - possibly_changed.add(state_key) - continue - - current_member_id = current_state_ids.get((EventTypes.Member, user_id)) - if not current_member_id: - continue - - # mapping from event_id -> state_dict - prev_state_ids = await self._state_storage.get_state_ids_for_events( - event_ids, - await_full_state=False, - ) - - # Check if we've joined the room? If so we just blindly add all the users to - # the "possibly changed" users. - for state_dict in prev_state_ids.values(): - member_event = state_dict.get((EventTypes.Member, user_id), None) - if not member_event or member_event != current_member_id: - for etype, state_key in current_state_ids.keys(): - if etype != EventTypes.Member: - continue - possibly_changed.add(state_key) - break - - # If there has been any change in membership, include them in the - # possibly changed list. We'll check if they are joined below, - # and we're not toooo worried about spuriously adding users. - for key, event_id in current_state_ids.items(): - etype, state_key = key - if etype != EventTypes.Member: + for delta in state_changes: + if delta.event_type != EventTypes.Member: continue - # check if this member has changed since any of the extremities - # at the stream_ordering, and add them to the list if so. - for state_dict in prev_state_ids.values(): - prev_event_id = state_dict.get(key, None) - if not prev_event_id or prev_event_id != event_id: - if state_key != user_id: - possibly_changed.add(state_key) - break + room_to_deltas.setdefault(room_id, []).append(delta) + if delta.event_id: + memberships_to_fetch.add(delta.event_id) + if delta.prev_event_id: + memberships_to_fetch.add(delta.prev_event_id) - if possibly_changed or possibly_left: - possibly_joined = possibly_changed - possibly_left = possibly_changed | possibly_left + # Fetch all the memberships for the membership events + event_id_to_memberships = await self.store.get_membership_from_event_ids( + memberships_to_fetch + ) - # Double check if we still share rooms with the given user. - users_rooms = await self.store.get_rooms_for_users(possibly_left) - for changed_user_id, entries in users_rooms.items(): - if any(rid in room_ids for rid in entries): - possibly_left.discard(changed_user_id) - else: - possibly_joined.discard(changed_user_id) + joined_invited_knocked = ( + Membership.JOIN, + Membership.INVITE, + Membership.KNOCK, + ) - else: - possibly_joined = set() - possibly_left = set() + # We now want to find any user that have newly joined/invited/knocked, + # or newly left, similarly to above. + newly_joined_or_invited_or_knocked_users: Set[str] = set() + newly_left_users: Set[str] = set() + for _, deltas in room_to_deltas.items(): + for delta in deltas: + # Get the prev/new memberships for the delta + new_membership = None + prev_membership = None + if delta.event_id: + m = event_id_to_memberships.get(delta.event_id) + if m is not None: + new_membership = m.membership + if delta.prev_event_id: + m = event_id_to_memberships.get(delta.prev_event_id) + if m is not None: + prev_membership = m.membership - device_list_updates = DeviceListUpdates( - changed=possibly_joined, - left=possibly_left, + # Check if a user has newly joined/invited/knocked, or left. + if new_membership in joined_invited_knocked: + if prev_membership not in joined_invited_knocked: + newly_joined_or_invited_or_knocked_users.add(delta.state_key) + newly_left_users.discard(delta.state_key) + elif prev_membership in joined_invited_knocked: + newly_joined_or_invited_or_knocked_users.discard(delta.state_key) + newly_left_users.add(delta.state_key) + + # Now we actually calculate the device list entry with the information + # calculated above. + device_list_updates = await self.generate_sync_entry_for_device_list( + user_id=user_id, + since_token=from_token, + now_token=now_token, + joined_room_ids=joined_room_ids, + newly_joined_rooms=newly_joined_rooms, + newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users, + newly_left_rooms=newly_left_rooms, + newly_left_users=newly_left_users, ) log_kv( @@ -356,6 +353,88 @@ class DeviceWorkerHandler: return device_list_updates + @measure_func("_generate_sync_entry_for_device_list") + async def generate_sync_entry_for_device_list( + self, + user_id: str, + since_token: StreamToken, + now_token: StreamToken, + joined_room_ids: AbstractSet[str], + newly_joined_rooms: AbstractSet[str], + newly_joined_or_invited_or_knocked_users: AbstractSet[str], + newly_left_rooms: AbstractSet[str], + newly_left_users: AbstractSet[str], + ) -> DeviceListUpdates: + """Generate the DeviceListUpdates section of sync + + Args: + sync_result_builder + newly_joined_rooms: Set of rooms user has joined since previous sync + newly_joined_or_invited_or_knocked_users: Set of users that have joined, + been invited to a room or are knocking on a room since + previous sync. + newly_left_rooms: Set of rooms user has left since previous sync + newly_left_users: Set of users that have left a room we're in since + previous sync + """ + # Take a copy since these fields will be mutated later. + newly_joined_or_invited_or_knocked_users = set( + newly_joined_or_invited_or_knocked_users + ) + newly_left_users = set(newly_left_users) + + # We want to figure out what user IDs the client should refetch + # device keys for, and which users we aren't going to track changes + # for anymore. + # + # For the first step we check: + # a. if any users we share a room with have updated their devices, + # and + # b. we also check if we've joined any new rooms, or if a user has + # joined a room we're in. + # + # For the second step we just find any users we no longer share a + # room with by looking at all users that have left a room plus users + # that were in a room we've left. + + users_that_have_changed = set() + + # Step 1a, check for changes in devices of users we share a room + # with + users_that_have_changed = await self.get_device_changes_in_shared_rooms( + user_id, + joined_room_ids, + from_token=since_token, + now_token=now_token, + ) + + # Step 1b, check for newly joined rooms + for room_id in newly_joined_rooms: + joined_users = await self.store.get_users_in_room(room_id) + newly_joined_or_invited_or_knocked_users.update(joined_users) + + # TODO: Check that these users are actually new, i.e. either they + # weren't in the previous sync *or* they left and rejoined. + users_that_have_changed.update(newly_joined_or_invited_or_knocked_users) + + user_signatures_changed = await self.store.get_users_whose_signatures_changed( + user_id, since_token.device_list_key + ) + users_that_have_changed.update(user_signatures_changed) + + # Now find users that we no longer track + for room_id in newly_left_rooms: + left_users = await self.store.get_users_in_room(room_id) + newly_left_users.update(left_users) + + # Remove any users that we still share a room with. + left_users_rooms = await self.store.get_rooms_for_users(newly_left_users) + for user_id, entries in left_users_rooms.items(): + if any(rid in joined_room_ids for rid in entries): + newly_left_users.discard(user_id) + + return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users) + async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: if not self.hs.is_mine(UserID.from_string(user_id)): raise SynapseError(400, "User is not hosted on this homeserver") diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 6af2eeb75f..c44baa7042 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -86,7 +86,7 @@ from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.lrucache import LruCache from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext -from synapse.util.metrics import Measure, measure_func +from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -1779,8 +1779,15 @@ class SyncHandler: ) if include_device_list_updates: - device_lists = await self._generate_sync_entry_for_device_list( - sync_result_builder, + # include_device_list_updates can only be True if we have a + # since token. + assert since_token is not None + + device_lists = await self._device_handler.generate_sync_entry_for_device_list( + user_id=user_id, + since_token=since_token, + now_token=sync_result_builder.now_token, + joined_room_ids=sync_result_builder.joined_room_ids, newly_joined_rooms=newly_joined_rooms, newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users, newly_left_rooms=newly_left_rooms, @@ -1892,8 +1899,14 @@ class SyncHandler: newly_left_users, ) = sync_result_builder.calculate_user_changes() - device_lists = await self._generate_sync_entry_for_device_list( - sync_result_builder, + # include_device_list_updates can only be True if we have a + # since token. + assert since_token is not None + device_lists = await self._device_handler.generate_sync_entry_for_device_list( + user_id=user_id, + since_token=since_token, + now_token=sync_result_builder.now_token, + joined_room_ids=sync_result_builder.joined_room_ids, newly_joined_rooms=newly_joined_rooms, newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users, newly_left_rooms=newly_left_rooms, @@ -2070,94 +2083,6 @@ class SyncHandler: return sync_result_builder - @measure_func("_generate_sync_entry_for_device_list") - async def _generate_sync_entry_for_device_list( - self, - sync_result_builder: "SyncResultBuilder", - newly_joined_rooms: AbstractSet[str], - newly_joined_or_invited_or_knocked_users: AbstractSet[str], - newly_left_rooms: AbstractSet[str], - newly_left_users: AbstractSet[str], - ) -> DeviceListUpdates: - """Generate the DeviceListUpdates section of sync - - Args: - sync_result_builder - newly_joined_rooms: Set of rooms user has joined since previous sync - newly_joined_or_invited_or_knocked_users: Set of users that have joined, - been invited to a room or are knocking on a room since - previous sync. - newly_left_rooms: Set of rooms user has left since previous sync - newly_left_users: Set of users that have left a room we're in since - previous sync - """ - - user_id = sync_result_builder.sync_config.user.to_string() - since_token = sync_result_builder.since_token - assert since_token is not None - - # Take a copy since these fields will be mutated later. - newly_joined_or_invited_or_knocked_users = set( - newly_joined_or_invited_or_knocked_users - ) - newly_left_users = set(newly_left_users) - - # We want to figure out what user IDs the client should refetch - # device keys for, and which users we aren't going to track changes - # for anymore. - # - # For the first step we check: - # a. if any users we share a room with have updated their devices, - # and - # b. we also check if we've joined any new rooms, or if a user has - # joined a room we're in. - # - # For the second step we just find any users we no longer share a - # room with by looking at all users that have left a room plus users - # that were in a room we've left. - - users_that_have_changed = set() - - joined_room_ids = sync_result_builder.joined_room_ids - - # Step 1a, check for changes in devices of users we share a room - # with - users_that_have_changed = ( - await self._device_handler.get_device_changes_in_shared_rooms( - user_id, - joined_room_ids, - from_token=since_token, - now_token=sync_result_builder.now_token, - ) - ) - - # Step 1b, check for newly joined rooms - for room_id in newly_joined_rooms: - joined_users = await self.store.get_users_in_room(room_id) - newly_joined_or_invited_or_knocked_users.update(joined_users) - - # TODO: Check that these users are actually new, i.e. either they - # weren't in the previous sync *or* they left and rejoined. - users_that_have_changed.update(newly_joined_or_invited_or_knocked_users) - - user_signatures_changed = await self.store.get_users_whose_signatures_changed( - user_id, since_token.device_list_key - ) - users_that_have_changed.update(user_signatures_changed) - - # Now find users that we no longer track - for room_id in newly_left_rooms: - left_users = await self.store.get_users_in_room(room_id) - newly_left_users.update(left_users) - - # Remove any users that we still share a room with. - left_users_rooms = await self.store.get_rooms_for_users(newly_left_users) - for user_id, entries in left_users_rooms.items(): - if any(rid in joined_room_ids for rid in entries): - newly_left_users.discard(user_id) - - return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users) - @trace async def _generate_sync_entry_for_to_device( self, sync_result_builder: "SyncResultBuilder" diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index 9ed39e688a..7d491d1728 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -166,6 +166,11 @@ class StateDeltasStore(SQLBaseStore): ) -> List[StateDelta]: """Get the state deltas between two tokens.""" + if not self._curr_state_delta_stream_cache.has_entity_changed( + room_id, from_token.stream + ): + return [] + def get_current_state_deltas_for_room_txn( txn: LoggingTransaction, ) -> List[StateDelta]: From 34b758644611721911a223814a7b35d8e14067e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 10:20:38 +0100 Subject: [PATCH 109/332] Bump types-requests from 2.31.0.20240406 to 2.32.0.20240712 (#17524) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 278bd6cb6e..f3b096660f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2901,13 +2901,13 @@ files = [ [[package]] name = "types-requests" -version = "2.31.0.20240406" +version = "2.32.0.20240712" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.31.0.20240406.tar.gz", hash = "sha256:4428df33c5503945c74b3f42e82b181e86ec7b724620419a2966e2de604ce1a1"}, - {file = "types_requests-2.31.0.20240406-py3-none-any.whl", hash = "sha256:6216cdac377c6b9a040ac1c0404f7284bd13199c0e1bb235f4324627e8898cf5"}, + {file = "types-requests-2.32.0.20240712.tar.gz", hash = "sha256:90c079ff05e549f6bf50e02e910210b98b8ff1ebdd18e19c873cd237737c1358"}, + {file = "types_requests-2.32.0.20240712-py3-none-any.whl", hash = "sha256:f754283e152c752e46e70942fa2a146b5bc70393522257bb85bd1ef7e019dcc3"}, ] [package.dependencies] From a640b318df37c889537bbf9207743dae2b4f390a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:31:34 +0100 Subject: [PATCH 110/332] Bump sigstore/cosign-installer from 3.5.0 to 3.6.0 (#17549) --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 06aaeb851f..1a97809a26 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -30,7 +30,7 @@ jobs: run: docker buildx inspect - name: Install Cosign - uses: sigstore/cosign-installer@v3.5.0 + uses: sigstore/cosign-installer@v3.6.0 - name: Checkout repository uses: actions/checkout@v4 From 4f7f6ee9a07795dd4efef232b5f372995f45eb60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:31:45 +0100 Subject: [PATCH 111/332] Bump lxml from 5.2.2 to 5.3.0 (#17550) --- poetry.lock | 284 ++++++++++++++++++++++++++-------------------------- 1 file changed, 140 insertions(+), 144 deletions(-) diff --git a/poetry.lock b/poetry.lock index f3b096660f..50fd0cbf76 100644 --- a/poetry.lock +++ b/poetry.lock @@ -998,153 +998,149 @@ pyasn1 = ">=0.4.6" [[package]] name = "lxml" -version = "5.2.2" +version = "5.3.0" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = true python-versions = ">=3.6" files = [ - {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:364d03207f3e603922d0d3932ef363d55bbf48e3647395765f9bfcbdf6d23632"}, - {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50127c186f191b8917ea2fb8b206fbebe87fd414a6084d15568c27d0a21d60db"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74e4f025ef3db1c6da4460dd27c118d8cd136d0391da4e387a15e48e5c975147"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:981a06a3076997adf7c743dcd0d7a0415582661e2517c7d961493572e909aa1d"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aef5474d913d3b05e613906ba4090433c515e13ea49c837aca18bde190853dff"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e275ea572389e41e8b039ac076a46cb87ee6b8542df3fff26f5baab43713bca"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5b65529bb2f21ac7861a0e94fdbf5dc0daab41497d18223b46ee8515e5ad297"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bcc98f911f10278d1daf14b87d65325851a1d29153caaf146877ec37031d5f36"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:b47633251727c8fe279f34025844b3b3a3e40cd1b198356d003aa146258d13a2"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:fbc9d316552f9ef7bba39f4edfad4a734d3d6f93341232a9dddadec4f15d425f"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:13e69be35391ce72712184f69000cda04fc89689429179bc4c0ae5f0b7a8c21b"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b6a30a9ab040b3f545b697cb3adbf3696c05a3a68aad172e3fd7ca73ab3c835"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a233bb68625a85126ac9f1fc66d24337d6e8a0f9207b688eec2e7c880f012ec0"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:dfa7c241073d8f2b8e8dbc7803c434f57dbb83ae2a3d7892dd068d99e96efe2c"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1a7aca7964ac4bb07680d5c9d63b9d7028cace3e2d43175cb50bba8c5ad33316"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ae4073a60ab98529ab8a72ebf429f2a8cc612619a8c04e08bed27450d52103c0"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ffb2be176fed4457e445fe540617f0252a72a8bc56208fd65a690fdb1f57660b"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e290d79a4107d7d794634ce3e985b9ae4f920380a813717adf61804904dc4393"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:96e85aa09274955bb6bd483eaf5b12abadade01010478154b0ec70284c1b1526"}, - {file = "lxml-5.2.2-cp310-cp310-win32.whl", hash = "sha256:f956196ef61369f1685d14dad80611488d8dc1ef00be57c0c5a03064005b0f30"}, - {file = "lxml-5.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:875a3f90d7eb5c5d77e529080d95140eacb3c6d13ad5b616ee8095447b1d22e7"}, - {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45f9494613160d0405682f9eee781c7e6d1bf45f819654eb249f8f46a2c22545"}, - {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0b3f2df149efb242cee2ffdeb6674b7f30d23c9a7af26595099afaf46ef4e88"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d28cb356f119a437cc58a13f8135ab8a4c8ece18159eb9194b0d269ec4e28083"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:657a972f46bbefdbba2d4f14413c0d079f9ae243bd68193cb5061b9732fa54c1"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b9ea10063efb77a965a8d5f4182806fbf59ed068b3c3fd6f30d2ac7bee734"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07542787f86112d46d07d4f3c4e7c760282011b354d012dc4141cc12a68cef5f"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:303f540ad2dddd35b92415b74b900c749ec2010e703ab3bfd6660979d01fd4ed"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2eb2227ce1ff998faf0cd7fe85bbf086aa41dfc5af3b1d80867ecfe75fb68df3"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:1d8a701774dfc42a2f0b8ccdfe7dbc140500d1049e0632a611985d943fcf12df"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:56793b7a1a091a7c286b5f4aa1fe4ae5d1446fe742d00cdf2ffb1077865db10d"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eb00b549b13bd6d884c863554566095bf6fa9c3cecb2e7b399c4bc7904cb33b5"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a2569a1f15ae6c8c64108a2cd2b4a858fc1e13d25846be0666fc144715e32ab"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:8cf85a6e40ff1f37fe0f25719aadf443686b1ac7652593dc53c7ef9b8492b115"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:d237ba6664b8e60fd90b8549a149a74fcc675272e0e95539a00522e4ca688b04"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0b3f5016e00ae7630a4b83d0868fca1e3d494c78a75b1c7252606a3a1c5fc2ad"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23441e2b5339bc54dc949e9e675fa35efe858108404ef9aa92f0456929ef6fe8"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2fb0ba3e8566548d6c8e7dd82a8229ff47bd8fb8c2da237607ac8e5a1b8312e5"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:79d1fb9252e7e2cfe4de6e9a6610c7cbb99b9708e2c3e29057f487de5a9eaefa"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6dcc3d17eac1df7859ae01202e9bb11ffa8c98949dcbeb1069c8b9a75917e01b"}, - {file = "lxml-5.2.2-cp311-cp311-win32.whl", hash = "sha256:4c30a2f83677876465f44c018830f608fa3c6a8a466eb223535035fbc16f3438"}, - {file = "lxml-5.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:49095a38eb333aaf44c06052fd2ec3b8f23e19747ca7ec6f6c954ffea6dbf7be"}, - {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7429e7faa1a60cad26ae4227f4dd0459efde239e494c7312624ce228e04f6391"}, - {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:50ccb5d355961c0f12f6cf24b7187dbabd5433f29e15147a67995474f27d1776"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc911208b18842a3a57266d8e51fc3cfaccee90a5351b92079beed912a7914c2"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33ce9e786753743159799fdf8e92a5da351158c4bfb6f2db0bf31e7892a1feb5"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec87c44f619380878bd49ca109669c9f221d9ae6883a5bcb3616785fa8f94c97"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08ea0f606808354eb8f2dfaac095963cb25d9d28e27edcc375d7b30ab01abbf6"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75a9632f1d4f698b2e6e2e1ada40e71f369b15d69baddb8968dcc8e683839b18"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74da9f97daec6928567b48c90ea2c82a106b2d500f397eeb8941e47d30b1ca85"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:0969e92af09c5687d769731e3f39ed62427cc72176cebb54b7a9d52cc4fa3b73"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:9164361769b6ca7769079f4d426a41df6164879f7f3568be9086e15baca61466"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d26a618ae1766279f2660aca0081b2220aca6bd1aa06b2cf73f07383faf48927"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab67ed772c584b7ef2379797bf14b82df9aa5f7438c5b9a09624dd834c1c1aaf"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3d1e35572a56941b32c239774d7e9ad724074d37f90c7a7d499ab98761bd80cf"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8268cbcd48c5375f46e000adb1390572c98879eb4f77910c6053d25cc3ac2c67"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e282aedd63c639c07c3857097fc0e236f984ceb4089a8b284da1c526491e3f3d"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfdc2bfe69e9adf0df4915949c22a25b39d175d599bf98e7ddf620a13678585"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4aefd911793b5d2d7a921233a54c90329bf3d4a6817dc465f12ffdfe4fc7b8fe"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8b8df03a9e995b6211dafa63b32f9d405881518ff1ddd775db4e7b98fb545e1c"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f11ae142f3a322d44513de1018b50f474f8f736bc3cd91d969f464b5bfef8836"}, - {file = "lxml-5.2.2-cp312-cp312-win32.whl", hash = "sha256:16a8326e51fcdffc886294c1e70b11ddccec836516a343f9ed0f82aac043c24a"}, - {file = "lxml-5.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:bbc4b80af581e18568ff07f6395c02114d05f4865c2812a1f02f2eaecf0bfd48"}, - {file = "lxml-5.2.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e3d9d13603410b72787579769469af730c38f2f25505573a5888a94b62b920f8"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38b67afb0a06b8575948641c1d6d68e41b83a3abeae2ca9eed2ac59892b36706"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c689d0d5381f56de7bd6966a4541bff6e08bf8d3871bbd89a0c6ab18aa699573"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:cf2a978c795b54c539f47964ec05e35c05bd045db5ca1e8366988c7f2fe6b3ce"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:739e36ef7412b2bd940f75b278749106e6d025e40027c0b94a17ef7968d55d56"}, - {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d8bbcd21769594dbba9c37d3c819e2d5847656ca99c747ddb31ac1701d0c0ed9"}, - {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:2304d3c93f2258ccf2cf7a6ba8c761d76ef84948d87bf9664e14d203da2cd264"}, - {file = "lxml-5.2.2-cp36-cp36m-win32.whl", hash = "sha256:02437fb7308386867c8b7b0e5bc4cd4b04548b1c5d089ffb8e7b31009b961dc3"}, - {file = "lxml-5.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:edcfa83e03370032a489430215c1e7783128808fd3e2e0a3225deee278585196"}, - {file = "lxml-5.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28bf95177400066596cdbcfc933312493799382879da504633d16cf60bba735b"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a745cc98d504d5bd2c19b10c79c61c7c3df9222629f1b6210c0368177589fb8"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b590b39ef90c6b22ec0be925b211298e810b4856909c8ca60d27ffbca6c12e6"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b336b0416828022bfd5a2e3083e7f5ba54b96242159f83c7e3eebaec752f1716"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:c2faf60c583af0d135e853c86ac2735ce178f0e338a3c7f9ae8f622fd2eb788c"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:4bc6cb140a7a0ad1f7bc37e018d0ed690b7b6520ade518285dc3171f7a117905"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7ff762670cada8e05b32bf1e4dc50b140790909caa8303cfddc4d702b71ea184"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:57f0a0bbc9868e10ebe874e9f129d2917750adf008fe7b9c1598c0fbbfdde6a6"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:a6d2092797b388342c1bc932077ad232f914351932353e2e8706851c870bca1f"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:60499fe961b21264e17a471ec296dcbf4365fbea611bf9e303ab69db7159ce61"}, - {file = "lxml-5.2.2-cp37-cp37m-win32.whl", hash = "sha256:d9b342c76003c6b9336a80efcc766748a333573abf9350f4094ee46b006ec18f"}, - {file = "lxml-5.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b16db2770517b8799c79aa80f4053cd6f8b716f21f8aca962725a9565ce3ee40"}, - {file = "lxml-5.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7ed07b3062b055d7a7f9d6557a251cc655eed0b3152b76de619516621c56f5d3"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f60fdd125d85bf9c279ffb8e94c78c51b3b6a37711464e1f5f31078b45002421"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a7e24cb69ee5f32e003f50e016d5fde438010c1022c96738b04fc2423e61706"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23cfafd56887eaed93d07bc4547abd5e09d837a002b791e9767765492a75883f"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19b4e485cd07b7d83e3fe3b72132e7df70bfac22b14fe4bf7a23822c3a35bff5"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7ce7ad8abebe737ad6143d9d3bf94b88b93365ea30a5b81f6877ec9c0dee0a48"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e49b052b768bb74f58c7dda4e0bdf7b79d43a9204ca584ffe1fb48a6f3c84c66"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d14a0d029a4e176795cef99c056d58067c06195e0c7e2dbb293bf95c08f772a3"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:be49ad33819d7dcc28a309b86d4ed98e1a65f3075c6acd3cd4fe32103235222b"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a6d17e0370d2516d5bb9062c7b4cb731cff921fc875644c3d751ad857ba9c5b1"}, - {file = "lxml-5.2.2-cp38-cp38-win32.whl", hash = "sha256:5b8c041b6265e08eac8a724b74b655404070b636a8dd6d7a13c3adc07882ef30"}, - {file = "lxml-5.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:f61efaf4bed1cc0860e567d2ecb2363974d414f7f1f124b1df368bbf183453a6"}, - {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb91819461b1b56d06fa4bcf86617fac795f6a99d12239fb0c68dbeba41a0a30"}, - {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d4ed0c7cbecde7194cd3228c044e86bf73e30a23505af852857c09c24e77ec5d"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54401c77a63cc7d6dc4b4e173bb484f28a5607f3df71484709fe037c92d4f0ed"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:625e3ef310e7fa3a761d48ca7ea1f9d8718a32b1542e727d584d82f4453d5eeb"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:519895c99c815a1a24a926d5b60627ce5ea48e9f639a5cd328bda0515ea0f10c"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c7079d5eb1c1315a858bbf180000757db8ad904a89476653232db835c3114001"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:343ab62e9ca78094f2306aefed67dcfad61c4683f87eee48ff2fd74902447726"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:cd9e78285da6c9ba2d5c769628f43ef66d96ac3085e59b10ad4f3707980710d3"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:546cf886f6242dff9ec206331209db9c8e1643ae642dea5fdbecae2453cb50fd"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:02f6a8eb6512fdc2fd4ca10a49c341c4e109aa6e9448cc4859af5b949622715a"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:339ee4a4704bc724757cd5dd9dc8cf4d00980f5d3e6e06d5847c1b594ace68ab"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0a028b61a2e357ace98b1615fc03f76eb517cc028993964fe08ad514b1e8892d"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f90e552ecbad426eab352e7b2933091f2be77115bb16f09f78404861c8322981"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:d83e2d94b69bf31ead2fa45f0acdef0757fa0458a129734f59f67f3d2eb7ef32"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a02d3c48f9bb1e10c7788d92c0c7db6f2002d024ab6e74d6f45ae33e3d0288a3"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6d68ce8e7b2075390e8ac1e1d3a99e8b6372c694bbe612632606d1d546794207"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:453d037e09a5176d92ec0fd282e934ed26d806331a8b70ab431a81e2fbabf56d"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3b019d4ee84b683342af793b56bb35034bd749e4cbdd3d33f7d1107790f8c472"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb3942960f0beb9f46e2a71a3aca220d1ca32feb5a398656be934320804c0df9"}, - {file = "lxml-5.2.2-cp39-cp39-win32.whl", hash = "sha256:ac6540c9fff6e3813d29d0403ee7a81897f1d8ecc09a8ff84d2eea70ede1cdbf"}, - {file = "lxml-5.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:610b5c77428a50269f38a534057444c249976433f40f53e3b47e68349cca1425"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b537bd04d7ccd7c6350cdaaaad911f6312cbd61e6e6045542f781c7f8b2e99d2"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4820c02195d6dfb7b8508ff276752f6b2ff8b64ae5d13ebe02e7667e035000b9"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a09f6184f17a80897172863a655467da2b11151ec98ba8d7af89f17bf63dae"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:76acba4c66c47d27c8365e7c10b3d8016a7da83d3191d053a58382311a8bf4e1"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b128092c927eaf485928cec0c28f6b8bead277e28acf56800e972aa2c2abd7a2"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ae791f6bd43305aade8c0e22f816b34f3b72b6c820477aab4d18473a37e8090b"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a2f6a1bc2460e643785a2cde17293bd7a8f990884b822f7bca47bee0a82fc66b"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e8d351ff44c1638cb6e980623d517abd9f580d2e53bfcd18d8941c052a5a009"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec4bd9133420c5c52d562469c754f27c5c9e36ee06abc169612c959bd7dbb07"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:55ce6b6d803890bd3cc89975fca9de1dff39729b43b73cb15ddd933b8bc20484"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ab6a358d1286498d80fe67bd3d69fcbc7d1359b45b41e74c4a26964ca99c3f8"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:06668e39e1f3c065349c51ac27ae430719d7806c026fec462e5693b08b95696b"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9cd5323344d8ebb9fb5e96da5de5ad4ebab993bbf51674259dbe9d7a18049525"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89feb82ca055af0fe797a2323ec9043b26bc371365847dbe83c7fd2e2f181c34"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e481bba1e11ba585fb06db666bfc23dbe181dbafc7b25776156120bf12e0d5a6"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d6c6ea6a11ca0ff9cd0390b885984ed31157c168565702959c25e2191674a14"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3d98de734abee23e61f6b8c2e08a88453ada7d6486dc7cdc82922a03968928db"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:69ab77a1373f1e7563e0fb5a29a8440367dec051da6c7405333699d07444f511"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:34e17913c431f5ae01d8658dbf792fdc457073dcdfbb31dc0cc6ab256e664a8d"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f8757b03208c3f50097761be2dea0aba02e94f0dc7023ed73a7bb14ff11eb0"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a520b4f9974b0a0a6ed73c2154de57cdfd0c8800f4f15ab2b73238ffed0b36e"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5e097646944b66207023bc3c634827de858aebc226d5d4d6d16f0b77566ea182"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b5e4ef22ff25bfd4ede5f8fb30f7b24446345f3e79d9b7455aef2836437bc38a"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff69a9a0b4b17d78170c73abe2ab12084bdf1691550c5629ad1fe7849433f324"}, - {file = "lxml-5.2.2.tar.gz", hash = "sha256:bb2dc4898180bea79863d5487e5f9c7c34297414bad54bcd0f0852aee9cfdb87"}, + {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"}, + {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:501d0d7e26b4d261fca8132854d845e4988097611ba2531408ec91cf3fd9d20a"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66442c2546446944437df74379e9cf9e9db353e61301d1a0e26482f43f0dd8"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e41506fec7a7f9405b14aa2d5c8abbb4dbbd09d88f9496958b6d00cb4d45330"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f7d4a670107d75dfe5ad080bed6c341d18c4442f9378c9f58e5851e86eb79965"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41ce1f1e2c7755abfc7e759dc34d7d05fd221723ff822947132dc934d122fe22"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:44264ecae91b30e5633013fb66f6ddd05c006d3e0e884f75ce0b4755b3e3847b"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:3c174dc350d3ec52deb77f2faf05c439331d6ed5e702fc247ccb4e6b62d884b7"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:2dfab5fa6a28a0b60a20638dc48e6343c02ea9933e3279ccb132f555a62323d8"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b1c8c20847b9f34e98080da785bb2336ea982e7f913eed5809e5a3c872900f32"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c86bf781b12ba417f64f3422cfc302523ac9cd1d8ae8c0f92a1c66e56ef2e86"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c162b216070f280fa7da844531169be0baf9ccb17263cf5a8bf876fcd3117fa5"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:36aef61a1678cb778097b4a6eeae96a69875d51d1e8f4d4b491ab3cfb54b5a03"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f65e5120863c2b266dbcc927b306c5b78e502c71edf3295dfcb9501ec96e5fc7"}, + {file = "lxml-5.3.0-cp310-cp310-win32.whl", hash = "sha256:ef0c1fe22171dd7c7c27147f2e9c3e86f8bdf473fed75f16b0c2e84a5030ce80"}, + {file = "lxml-5.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:052d99051e77a4f3e8482c65014cf6372e61b0a6f4fe9edb98503bb5364cfee3"}, + {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b"}, + {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be"}, + {file = "lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9"}, + {file = "lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1"}, + {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859"}, + {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d"}, + {file = "lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30"}, + {file = "lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f"}, + {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a"}, + {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b"}, + {file = "lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957"}, + {file = "lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d"}, + {file = "lxml-5.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8f0de2d390af441fe8b2c12626d103540b5d850d585b18fcada58d972b74a74e"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1afe0a8c353746e610bd9031a630a95bcfb1a720684c3f2b36c4710a0a96528f"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56b9861a71575f5795bde89256e7467ece3d339c9b43141dbdd54544566b3b94"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:9fb81d2824dff4f2e297a276297e9031f46d2682cafc484f49de182aa5e5df99"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2c226a06ecb8cdef28845ae976da407917542c5e6e75dcac7cc33eb04aaeb237"}, + {file = "lxml-5.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7d3d1ca42870cdb6d0d29939630dbe48fa511c203724820fc0fd507b2fb46577"}, + {file = "lxml-5.3.0-cp36-cp36m-win32.whl", hash = "sha256:094cb601ba9f55296774c2d57ad68730daa0b13dc260e1f941b4d13678239e70"}, + {file = "lxml-5.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:eafa2c8658f4e560b098fe9fc54539f86528651f61849b22111a9b107d18910c"}, + {file = "lxml-5.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb83f8a875b3d9b458cada4f880fa498646874ba4011dc974e071a0a84a1b033"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25f1b69d41656b05885aa185f5fdf822cb01a586d1b32739633679699f220391"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23e0553b8055600b3bf4a00b255ec5c92e1e4aebf8c2c09334f8368e8bd174d6"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ada35dd21dc6c039259596b358caab6b13f4db4d4a7f8665764d616daf9cc1d"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:81b4e48da4c69313192d8c8d4311e5d818b8be1afe68ee20f6385d0e96fc9512"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:2bc9fd5ca4729af796f9f59cd8ff160fe06a474da40aca03fcc79655ddee1a8b"}, + {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07da23d7ee08577760f0a71d67a861019103e4812c87e2fab26b039054594cc5"}, + {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ea2e2f6f801696ad7de8aec061044d6c8c0dd4037608c7cab38a9a4d316bfb11"}, + {file = "lxml-5.3.0-cp37-cp37m-win32.whl", hash = "sha256:5c54afdcbb0182d06836cc3d1be921e540be3ebdf8b8a51ee3ef987537455f84"}, + {file = "lxml-5.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2901429da1e645ce548bf9171784c0f74f0718c3f6150ce166be39e4dd66c3e"}, + {file = "lxml-5.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c56a1d43b2f9ee4786e4658c7903f05da35b923fb53c11025712562d5cc02753"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ee8c39582d2652dcd516d1b879451500f8db3fe3607ce45d7c5957ab2596040"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdf3a3059611f7585a78ee10399a15566356116a4288380921a4b598d807a22"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:146173654d79eb1fc97498b4280c1d3e1e5d58c398fa530905c9ea50ea849b22"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0a7056921edbdd7560746f4221dca89bb7a3fe457d3d74267995253f46343f15"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:9e4b47ac0f5e749cfc618efdf4726269441014ae1d5583e047b452a32e221920"}, + {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f914c03e6a31deb632e2daa881fe198461f4d06e57ac3d0e05bbcab8eae01945"}, + {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:213261f168c5e1d9b7535a67e68b1f59f92398dd17a56d934550837143f79c42"}, + {file = "lxml-5.3.0-cp38-cp38-win32.whl", hash = "sha256:218c1b2e17a710e363855594230f44060e2025b05c80d1f0661258142b2add2e"}, + {file = "lxml-5.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:315f9542011b2c4e1d280e4a20ddcca1761993dda3afc7a73b01235f8641e903"}, + {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ffc23010330c2ab67fac02781df60998ca8fe759e8efde6f8b756a20599c5de"}, + {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2b3778cb38212f52fac9fe913017deea2fdf4eb1a4f8e4cfc6b009a13a6d3fcc"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b0c7a688944891086ba192e21c5229dea54382f4836a209ff8d0a660fac06be"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:747a3d3e98e24597981ca0be0fd922aebd471fa99d0043a3842d00cdcad7ad6a"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86a6b24b19eaebc448dc56b87c4865527855145d851f9fc3891673ff97950540"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b11a5d918a6216e521c715b02749240fb07ae5a1fefd4b7bf12f833bc8b4fe70"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b87753c784d6acb8a25b05cb526c3406913c9d988d51f80adecc2b0775d6aa"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:109fa6fede314cc50eed29e6e56c540075e63d922455346f11e4d7a036d2b8cf"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:02ced472497b8362c8e902ade23e3300479f4f43e45f4105c85ef43b8db85229"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:6b038cc86b285e4f9fea2ba5ee76e89f21ed1ea898e287dc277a25884f3a7dfe"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7437237c6a66b7ca341e868cda48be24b8701862757426852c9b3186de1da8a2"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7f41026c1d64043a36fda21d64c5026762d53a77043e73e94b71f0521939cc71"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:482c2f67761868f0108b1743098640fbb2a28a8e15bf3f47ada9fa59d9fe08c3"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1483fd3358963cc5c1c9b122c80606a3a79ee0875bcac0204149fa09d6ff2727"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dec2d1130a9cda5b904696cec33b2cfb451304ba9081eeda7f90f724097300a"}, + {file = "lxml-5.3.0-cp39-cp39-win32.whl", hash = "sha256:a0eabd0a81625049c5df745209dc7fcef6e2aea7793e5f003ba363610aa0a3ff"}, + {file = "lxml-5.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:89e043f1d9d341c52bf2af6d02e6adde62e0a46e6755d5eb60dc6e4f0b8aeca2"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:8dc2c0395bea8254d8daebc76dcf8eb3a95ec2a46fa6fae5eaccee366bfe02ce"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6ba0d3dcac281aad8a0e5b14c7ed6f9fa89c8612b47939fc94f80b16e2e9bc83"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:94d6c3782907b5e40e21cadf94b13b0842ac421192f26b84c45f13f3c9d5dc27"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c300306673aa0f3ed5ed9372b21867690a17dba38c68c44b287437c362ce486b"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d9b952e07aed35fe2e1a7ad26e929595412db48535921c5013edc8aa4a35ce"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:01220dca0d066d1349bd6a1726856a78f7929f3878f7e2ee83c296c69495309e"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2d9b8d9177afaef80c53c0a9e30fa252ff3036fb1c6494d427c066a4ce6a282f"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:20094fc3f21ea0a8669dc4c61ed7fa8263bd37d97d93b90f28fc613371e7a875"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ace2c2326a319a0bb8a8b0e5b570c764962e95818de9f259ce814ee666603f19"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92e67a0be1639c251d21e35fe74df6bcc40cba445c2cda7c4a967656733249e2"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5350b55f9fecddc51385463a4f67a5da829bc741e38cf689f38ec9023f54ab"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c1fefd7e3d00921c44dc9ca80a775af49698bbfd92ea84498e56acffd4c5469"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:71a8dd38fbd2f2319136d4ae855a7078c69c9a38ae06e0c17c73fd70fc6caad8"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:97acf1e1fd66ab53dacd2c35b319d7e548380c2e9e8c54525c6e76d21b1ae3b1"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:68934b242c51eb02907c5b81d138cb977b2129a0a75a8f8b60b01cb8586c7b21"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b710bc2b8292966b23a6a0121f7a6c51d45d2347edcc75f016ac123b8054d3f2"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18feb4b93302091b1541221196a2155aa296c363fd233814fa11e181adebc52f"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3eb44520c4724c2e1a57c0af33a379eee41792595023f367ba3952a2d96c2aab"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:609251a0ca4770e5a8768ff902aa02bf636339c5a93f9349b48eb1f606f7f3e9"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:516f491c834eb320d6c843156440fe7fc0d50b33e44387fcec5b02f0bc118a4c"}, + {file = "lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f"}, ] [package.extras] @@ -1152,7 +1148,7 @@ cssselect = ["cssselect (>=0.7)"] html-clean = ["lxml-html-clean"] html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=3.0.10)"] +source = ["Cython (>=3.0.11)"] [[package]] name = "lxml-stubs" From b7faf01f268c135d24fc8d2fc621dbe6392b8b94 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:31:56 +0100 Subject: [PATCH 112/332] Bump phonenumbers from 8.13.42 to 8.13.43 (#17551) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 50fd0cbf76..0f2bde23a8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1512,13 +1512,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.42" +version = "8.13.43" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.42-py2.py3-none-any.whl", hash = "sha256:18acc22ee03116d27b26e990f53806a1770a3e05f05e1620bc09ad187f889456"}, - {file = "phonenumbers-8.13.42.tar.gz", hash = "sha256:7137904f2db3b991701e853174ce8e1cb8f540b8bfdf27617540de04c0b7bed5"}, + {file = "phonenumbers-8.13.43-py2.py3-none-any.whl", hash = "sha256:339e521403fe4dd9c664dbbeb2fe434f9ea5c81e54c0fdfadbaeb53b26a76c27"}, + {file = "phonenumbers-8.13.43.tar.gz", hash = "sha256:35b904e4a79226eee027fbb467a9aa6f1ab9ffc3c09c91bf14b885c154936726"}, ] [[package]] From 8bbe65f3c039f18526fe46bb53291de5febe3a34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:32:05 +0100 Subject: [PATCH 113/332] Bump types-pyyaml from 6.0.12.20240311 to 6.0.12.20240808 (#17552) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0f2bde23a8..e2ab13f54d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2886,13 +2886,13 @@ types-cffi = "*" [[package]] name = "types-pyyaml" -version = "6.0.12.20240311" +version = "6.0.12.20240808" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.8" files = [ - {file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"}, - {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"}, + {file = "types-PyYAML-6.0.12.20240808.tar.gz", hash = "sha256:b8f76ddbd7f65440a8bda5526a9607e4c7a322dc2f8e1a8c405644f9a6f4b9af"}, + {file = "types_PyYAML-6.0.12.20240808-py3-none-any.whl", hash = "sha256:deda34c5c655265fc517b546c902aa6eed2ef8d3e921e4765fe606fe2afe8d35"}, ] [[package]] From b076941a3641fc62c40e844317b56cc24acf8f02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:32:18 +0100 Subject: [PATCH 114/332] Bump sentry-sdk from 2.10.0 to 2.12.0 (#17553) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index e2ab13f54d..37c490fcfa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2414,13 +2414,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.10.0" +version = "2.12.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.10.0-py2.py3-none-any.whl", hash = "sha256:87b3d413c87d8e7f816cc9334bff255a83d8b577db2b22042651c30c19c09190"}, - {file = "sentry_sdk-2.10.0.tar.gz", hash = "sha256:545fcc6e36c335faa6d6cda84669b6e17025f31efbf3b2211ec14efe008b75d1"}, + {file = "sentry_sdk-2.12.0-py2.py3-none-any.whl", hash = "sha256:7a8d5163d2ba5c5f4464628c6b68f85e86972f7c636acc78aed45c61b98b7a5e"}, + {file = "sentry_sdk-2.12.0.tar.gz", hash = "sha256:8763840497b817d44c49b3fe3f5f7388d083f2337ffedf008b2cdb63b5c86dc6"}, ] [package.dependencies] @@ -2450,7 +2450,7 @@ langchain = ["langchain (>=0.0.210)"] loguru = ["loguru (>=0.5)"] openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] -opentelemetry-experimental = ["opentelemetry-instrumentation-aio-pika (==0.46b0)", "opentelemetry-instrumentation-aiohttp-client (==0.46b0)", "opentelemetry-instrumentation-aiopg (==0.46b0)", "opentelemetry-instrumentation-asgi (==0.46b0)", "opentelemetry-instrumentation-asyncio (==0.46b0)", "opentelemetry-instrumentation-asyncpg (==0.46b0)", "opentelemetry-instrumentation-aws-lambda (==0.46b0)", "opentelemetry-instrumentation-boto (==0.46b0)", "opentelemetry-instrumentation-boto3sqs (==0.46b0)", "opentelemetry-instrumentation-botocore (==0.46b0)", "opentelemetry-instrumentation-cassandra (==0.46b0)", "opentelemetry-instrumentation-celery (==0.46b0)", "opentelemetry-instrumentation-confluent-kafka (==0.46b0)", "opentelemetry-instrumentation-dbapi (==0.46b0)", "opentelemetry-instrumentation-django (==0.46b0)", "opentelemetry-instrumentation-elasticsearch (==0.46b0)", "opentelemetry-instrumentation-falcon (==0.46b0)", "opentelemetry-instrumentation-fastapi (==0.46b0)", "opentelemetry-instrumentation-flask (==0.46b0)", "opentelemetry-instrumentation-grpc (==0.46b0)", "opentelemetry-instrumentation-httpx (==0.46b0)", "opentelemetry-instrumentation-jinja2 (==0.46b0)", "opentelemetry-instrumentation-kafka-python (==0.46b0)", "opentelemetry-instrumentation-logging (==0.46b0)", "opentelemetry-instrumentation-mysql (==0.46b0)", "opentelemetry-instrumentation-mysqlclient (==0.46b0)", "opentelemetry-instrumentation-pika (==0.46b0)", "opentelemetry-instrumentation-psycopg (==0.46b0)", "opentelemetry-instrumentation-psycopg2 (==0.46b0)", "opentelemetry-instrumentation-pymemcache (==0.46b0)", "opentelemetry-instrumentation-pymongo (==0.46b0)", "opentelemetry-instrumentation-pymysql (==0.46b0)", "opentelemetry-instrumentation-pyramid (==0.46b0)", "opentelemetry-instrumentation-redis (==0.46b0)", "opentelemetry-instrumentation-remoulade (==0.46b0)", "opentelemetry-instrumentation-requests (==0.46b0)", "opentelemetry-instrumentation-sklearn (==0.46b0)", "opentelemetry-instrumentation-sqlalchemy (==0.46b0)", "opentelemetry-instrumentation-sqlite3 (==0.46b0)", "opentelemetry-instrumentation-starlette (==0.46b0)", "opentelemetry-instrumentation-system-metrics (==0.46b0)", "opentelemetry-instrumentation-threading (==0.46b0)", "opentelemetry-instrumentation-tornado (==0.46b0)", "opentelemetry-instrumentation-tortoiseorm (==0.46b0)", "opentelemetry-instrumentation-urllib (==0.46b0)", "opentelemetry-instrumentation-urllib3 (==0.46b0)", "opentelemetry-instrumentation-wsgi (==0.46b0)"] +opentelemetry-experimental = ["opentelemetry-distro"] pure-eval = ["asttokens", "executing", "pure-eval"] pymongo = ["pymongo (>=3.1)"] pyspark = ["pyspark (>=2.4.4)"] From 8c9f2743bc30b5420b80f88b389e69e3044cde85 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:33:12 +0100 Subject: [PATCH 115/332] Bump serde_json from 1.0.122 to 1.0.124 (#17555) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce5520436d..c0b951a161 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.122" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" +checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" dependencies = [ "itoa", "memchr", From e1f5f0fbb87d39a7420fefd1809687a0c60bef7a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:58:01 +0100 Subject: [PATCH 116/332] Bump setuptools from 67.6.0 to 72.1.0 (#17542) --- changelog.d/17542.misc | 1 + poetry.lock | 25 +++++++------------------ 2 files changed, 8 insertions(+), 18 deletions(-) create mode 100644 changelog.d/17542.misc diff --git a/changelog.d/17542.misc b/changelog.d/17542.misc new file mode 100644 index 0000000000..b5773115ca --- /dev/null +++ b/changelog.d/17542.misc @@ -0,0 +1 @@ +Bump setuptools from 67.6.0 to 72.1.0. \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 37c490fcfa..d476973ead 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2099,7 +2099,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2107,16 +2106,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2133,7 +2124,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2141,7 +2131,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -2488,19 +2477,19 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"] [[package]] name = "setuptools" -version = "67.6.0" +version = "72.1.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"}, - {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"}, + {file = "setuptools-72.1.0-py3-none-any.whl", hash = "sha256:5a03e1860cf56bb6ef48ce186b0e557fdba433237481a9a625176c2831be15d1"}, + {file = "setuptools-72.1.0.tar.gz", hash = "sha256:8d243eff56d095e5817f796ede6ae32941278f542e0f941867cc05ae52b162ec"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "ordered-set (>=3.1.1)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (<0.4)", "pytest-ruff (>=0.2.1)", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "setuptools-rust" From ff7b27013e14a4e8951e0cdb472bfd63da6e2a96 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Aug 2024 08:59:35 +0100 Subject: [PATCH 117/332] Bump serde from 1.0.204 to 1.0.206 (#17556) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c0b951a161..d74309a756 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.204" +version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "5b3e4cd94123dd520a128bcd11e34d9e9e423e7e3e50425cb1b4b1e3549d0284" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "fabfb6138d2383ea8208cf98ccf69cdfb1aff4088460681d84189aa259762f97" dependencies = [ "proc-macro2", "quote", From 9f9ec92526fbc108d140ed57608ed5ae6c79604f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Aug 2024 14:06:17 +0100 Subject: [PATCH 118/332] Speed up responding to media requests (#17558) We do this by reading from a threadpool, rather than blocking the main thread. This is broadly what we do in the [S3 storage provider](https://github.com/matrix-org/synapse-s3-storage-provider/blob/main/s3_storage_provider.py#L234) --- changelog.d/17558.misc | 1 + synapse/http/server.py | 4 +- synapse/http/site.py | 2 +- synapse/media/_base.py | 141 +++++++++++++++++++++++++++++- synapse/media/media_storage.py | 19 ++-- synapse/media/storage_provider.py | 5 +- synapse/media/thumbnailer.py | 3 +- 7 files changed, 154 insertions(+), 21 deletions(-) create mode 100644 changelog.d/17558.misc diff --git a/changelog.d/17558.misc b/changelog.d/17558.misc new file mode 100644 index 0000000000..cfa8089a81 --- /dev/null +++ b/changelog.d/17558.misc @@ -0,0 +1 @@ +Speed up responding to media requests. diff --git a/synapse/http/server.py b/synapse/http/server.py index 0d0c610b28..211795dc39 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -74,7 +74,6 @@ from synapse.api.errors import ( from synapse.config.homeserver import HomeServerConfig from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.opentracing import active_span, start_active_span, trace_servlet -from synapse.types import ISynapseReactor from synapse.util import json_encoder from synapse.util.caches import intern_dict from synapse.util.cancellation import is_function_cancellable @@ -869,8 +868,7 @@ async def _async_write_json_to_request_in_thread( with start_active_span("encode_json_response"): span = active_span() - reactor: ISynapseReactor = request.reactor # type: ignore - json_str = await defer_to_thread(reactor, encode, span) + json_str = await defer_to_thread(request.reactor, encode, span) _write_bytes_to_request(request, json_str) diff --git a/synapse/http/site.py b/synapse/http/site.py index af169ba51e..8bf63edd36 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -658,7 +658,7 @@ class SynapseSite(ProxySite): ) self.site_tag = site_tag - self.reactor = reactor + self.reactor: ISynapseReactor = reactor assert config.http_options is not None proxied = config.http_options.x_forwarded diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 1b268ce4d4..21f334339b 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -22,12 +22,14 @@ import logging import os +import threading import urllib from abc import ABC, abstractmethod from types import TracebackType from typing import ( TYPE_CHECKING, Awaitable, + BinaryIO, Dict, Generator, List, @@ -37,15 +39,19 @@ from typing import ( ) import attr +from zope.interface import implementer +from twisted.internet import interfaces +from twisted.internet.defer import Deferred from twisted.internet.interfaces import IConsumer -from twisted.protocols.basic import FileSender +from twisted.python.failure import Failure from twisted.web.server import Request from synapse.api.errors import Codes, cs_error from synapse.http.server import finish_request, respond_with_json from synapse.http.site import SynapseRequest -from synapse.logging.context import make_deferred_yieldable +from synapse.logging.context import defer_to_thread, make_deferred_yieldable +from synapse.types import ISynapseReactor from synapse.util import Clock from synapse.util.stringutils import is_ascii @@ -138,7 +144,7 @@ async def respond_with_file( add_file_headers(request, media_type, file_size, upload_name) with open(file_path, "rb") as f: - await make_deferred_yieldable(FileSender().beginFileTransfer(f, request)) + await ThreadedFileSender(request.reactor).beginFileTransfer(f, request) finish_request(request) else: @@ -601,3 +607,132 @@ def _parseparam(s: bytes) -> Generator[bytes, None, None]: f = s[:end] yield f.strip() s = s[end:] + + +@implementer(interfaces.IPushProducer) +class ThreadedFileSender: + """ + A producer that sends the contents of a file to a consumer, reading from the + file on a thread. + + This works by spawning a loop in a threadpool that repeatedly reads from the + file and sends it to the consumer. The main thread communicates with the + loop via two `threading.Event`, which controls when to start/pause reading + and when to terminate. + """ + + # How much data to read in one go. + CHUNK_SIZE = 2**14 + + # How long we wait for the consumer to be ready again before aborting the + # read. + TIMEOUT_SECONDS = 90.0 + + def __init__(self, reactor: ISynapseReactor) -> None: + self.reactor = reactor + + self.file: Optional[BinaryIO] = None + self.deferred: "Deferred[None]" = Deferred() + self.consumer: Optional[interfaces.IConsumer] = None + + # Signals if the thread should keep reading/sending data. Set means + # continue, clear means pause. + self.wakeup_event = threading.Event() + + # Signals if the thread should terminate, e.g. because the consumer has + # gone away. Both this and `wakeup_event` should be set to terminate the + # loop (otherwise the thread will block on `wakeup_event`). + self.stop_event = threading.Event() + + def beginFileTransfer( + self, file: BinaryIO, consumer: interfaces.IConsumer + ) -> "Deferred[None]": + """ + Begin transferring a file + """ + self.file = file + self.consumer = consumer + + self.consumer.registerProducer(self, True) + + # We set the wakeup signal as we should start producing immediately. + self.wakeup_event.set() + defer_to_thread(self.reactor, self._on_thread_read_loop) + + return make_deferred_yieldable(self.deferred) + + def resumeProducing(self) -> None: + """interfaces.IPushProducer""" + self.wakeup_event.set() + + def pauseProducing(self) -> None: + """interfaces.IPushProducer""" + self.wakeup_event.clear() + + def stopProducing(self) -> None: + """interfaces.IPushProducer""" + + # Terminate the thread loop. + self.wakeup_event.set() + self.stop_event.set() + + if not self.deferred.called: + self.deferred.errback(Exception("Consumer asked us to stop producing")) + + def _on_thread_read_loop(self) -> None: + """This is the loop that happens on a thread.""" + + try: + while not self.stop_event.is_set(): + # We wait for the producer to signal that the consumer wants + # more data (or we should abort) + if not self.wakeup_event.is_set(): + ret = self.wakeup_event.wait(self.TIMEOUT_SECONDS) + if not ret: + raise Exception("Timed out waiting to resume") + + # Check if we were woken up so that we abort the download + if self.stop_event.is_set(): + return + + # The file should always have been set before we get here. + assert self.file is not None + + chunk = self.file.read(self.CHUNK_SIZE) + if not chunk: + return + + self.reactor.callFromThread(self._write, chunk) + + except Exception: + self.reactor.callFromThread(self._error, Failure()) + finally: + self.reactor.callFromThread(self._finish) + + def _write(self, chunk: bytes) -> None: + """Called from the thread to write a chunk of data""" + if self.consumer: + self.consumer.write(chunk) + + def _error(self, failure: Failure) -> None: + """Called from the thread when there was a fatal error""" + if self.consumer: + self.consumer.unregisterProducer() + self.consumer = None + + if not self.deferred.called: + self.deferred.errback(failure) + + def _finish(self) -> None: + """Called from the thread when it finishes (either on success or + failure).""" + if self.file: + self.file.close() + self.file = None + + if self.consumer: + self.consumer.unregisterProducer() + self.consumer = None + + if not self.deferred.called: + self.deferred.callback(None) diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index 2a106bb0eb..e06273c92f 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -49,19 +49,15 @@ from zope.interface import implementer from twisted.internet import interfaces from twisted.internet.defer import Deferred from twisted.internet.interfaces import IConsumer -from twisted.protocols.basic import FileSender from synapse.api.errors import NotFoundError -from synapse.logging.context import ( - defer_to_thread, - make_deferred_yieldable, - run_in_background, -) +from synapse.logging.context import defer_to_thread, run_in_background from synapse.logging.opentracing import start_active_span, trace, trace_with_opname +from synapse.media._base import ThreadedFileSender from synapse.util import Clock from synapse.util.file_consumer import BackgroundFileConsumer -from ..types import JsonDict +from ..types import ISynapseReactor, JsonDict from ._base import FileInfo, Responder from .filepath import MediaFilePaths @@ -213,7 +209,7 @@ class MediaStorage: local_path = os.path.join(self.local_media_directory, path) if os.path.exists(local_path): logger.debug("responding with local file %s", local_path) - return FileResponder(open(local_path, "rb")) + return FileResponder(self.reactor, open(local_path, "rb")) logger.debug("local file %s did not exist", local_path) for provider in self.storage_providers: @@ -336,12 +332,13 @@ class FileResponder(Responder): is closed when finished streaming. """ - def __init__(self, open_file: IO): + def __init__(self, reactor: ISynapseReactor, open_file: BinaryIO): + self.reactor = reactor self.open_file = open_file def write_to_consumer(self, consumer: IConsumer) -> Deferred: - return make_deferred_yieldable( - FileSender().beginFileTransfer(self.open_file, consumer) + return ThreadedFileSender(self.reactor).beginFileTransfer( + self.open_file, consumer ) def __exit__( diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py index 06e5d27a53..355df999d2 100644 --- a/synapse/media/storage_provider.py +++ b/synapse/media/storage_provider.py @@ -145,6 +145,7 @@ class FileStorageProviderBackend(StorageProvider): def __init__(self, hs: "HomeServer", config: str): self.hs = hs + self.reactor = hs.get_reactor() self.cache_directory = hs.config.media.media_store_path self.base_directory = config @@ -165,7 +166,7 @@ class FileStorageProviderBackend(StorageProvider): shutil_copyfile: Callable[[str, str], str] = shutil.copyfile with start_active_span("shutil_copyfile"): await defer_to_thread( - self.hs.get_reactor(), + self.reactor, shutil_copyfile, primary_fname, backup_fname, @@ -177,7 +178,7 @@ class FileStorageProviderBackend(StorageProvider): backup_fname = os.path.join(self.base_directory, path) if os.path.isfile(backup_fname): - return FileResponder(open(backup_fname, "rb")) + return FileResponder(self.reactor, open(backup_fname, "rb")) return None diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index ef6aa8ccf5..3380315b27 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -259,6 +259,7 @@ class ThumbnailProvider: media_storage: MediaStorage, ): self.hs = hs + self.reactor = hs.get_reactor() self.media_repo = media_repo self.media_storage = media_storage self.store = hs.get_datastores().main @@ -373,7 +374,7 @@ class ThumbnailProvider: await respond_with_multipart_responder( self.hs.get_clock(), request, - FileResponder(open(file_path, "rb")), + FileResponder(self.reactor, open(file_path, "rb")), media_info, ) else: From bab37dfc6fe81ebb4995859ebe2c09d68a12a0c9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Aug 2024 14:37:01 +0100 Subject: [PATCH 119/332] 1.113.0 --- CHANGES.md | 7 +++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index c40c11f98c..7b71e32e23 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.113.0 (2024-08-13) + +No significant changes since 1.113.0rc1. + + + + # Synapse 1.113.0rc1 (2024-08-06) ### Features diff --git a/debian/changelog b/debian/changelog index e89fdb98dc..2692ab621d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.113.0) stable; urgency=medium + + * New Synapse release 1.113.0. + + -- Synapse Packaging team Tue, 13 Aug 2024 14:36:56 +0100 + matrix-synapse-py3 (1.113.0~rc1) stable; urgency=medium * New Synapse release 1.113.0rc1. diff --git a/pyproject.toml b/pyproject.toml index c29d1534fb..82369f9052 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.113.0rc1" +version = "1.113.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 3e7eb45eb1bc6d02a6cd439f2829a63ac4835b55 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Aug 2024 15:00:57 +0100 Subject: [PATCH 120/332] Fixup media logcontexts (#17561) Regression from #17558 --- changelog.d/17561.misc | 1 + synapse/media/_base.py | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17561.misc diff --git a/changelog.d/17561.misc b/changelog.d/17561.misc new file mode 100644 index 0000000000..cfa8089a81 --- /dev/null +++ b/changelog.d/17561.misc @@ -0,0 +1 @@ +Speed up responding to media requests. diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 21f334339b..ad80098e9f 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -50,7 +50,11 @@ from twisted.web.server import Request from synapse.api.errors import Codes, cs_error from synapse.http.server import finish_request, respond_with_json from synapse.http.site import SynapseRequest -from synapse.logging.context import defer_to_thread, make_deferred_yieldable +from synapse.logging.context import ( + defer_to_thread, + make_deferred_yieldable, + run_in_background, +) from synapse.types import ISynapseReactor from synapse.util import Clock from synapse.util.stringutils import is_ascii @@ -657,7 +661,7 @@ class ThreadedFileSender: # We set the wakeup signal as we should start producing immediately. self.wakeup_event.set() - defer_to_thread(self.reactor, self._on_thread_read_loop) + run_in_background(defer_to_thread, self.reactor, self._on_thread_read_loop) return make_deferred_yieldable(self.deferred) @@ -672,6 +676,9 @@ class ThreadedFileSender: def stopProducing(self) -> None: """interfaces.IPushProducer""" + # Unregister the consumer so we don't try and interact with it again. + self.consumer = None + # Terminate the thread loop. self.wakeup_event.set() self.stop_event.set() From aaa3c3642020c4035f99066e6f96cb4015972abe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Aug 2024 15:56:18 +0100 Subject: [PATCH 121/332] Remove logging in multipart (#17563) This is really spurious and causes a lot of spam. I don't think there is a use for it even at DEBUG level. --- changelog.d/17563.misc | 1 + synapse/http/client.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/17563.misc diff --git a/changelog.d/17563.misc b/changelog.d/17563.misc new file mode 100644 index 0000000000..672764ab82 --- /dev/null +++ b/changelog.d/17563.misc @@ -0,0 +1 @@ +Reduce log spam of multipart files. diff --git a/synapse/http/client.py b/synapse/http/client.py index 56ad28eabf..daa5cc899b 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1088,7 +1088,6 @@ class _MultipartParserProtocol(protocol.Protocol): return # otherwise we are in the file part else: - logger.info("Writing multipart file data to stream") try: self.stream.write(data[start:end]) except Exception as e: From 8fea190a1f41238f09ba2270aebc2f83add817ac Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 13 Aug 2024 12:04:35 -0400 Subject: [PATCH 122/332] Add missing docstrings related to profile methods. (#17559) --- changelog.d/17559.doc | 1 + synapse/handlers/profile.py | 35 +++++++++++++++++++++++ synapse/storage/databases/main/profile.py | 34 ++++++++++++++++++++++ 3 files changed, 70 insertions(+) create mode 100644 changelog.d/17559.doc diff --git a/changelog.d/17559.doc b/changelog.d/17559.doc new file mode 100644 index 0000000000..e54a122b74 --- /dev/null +++ b/changelog.d/17559.doc @@ -0,0 +1 @@ +Improve docstrings for profile methods. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 6663d4b271..af8cd838ee 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -74,6 +74,17 @@ class ProfileHandler: self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict: + """ + Get a user's profile as a JSON dictionary. + + Args: + user_id: The user to fetch the profile of. + ignore_backoff: True to ignore backoff when fetching over federation. + + Returns: + A JSON dictionary. For local queries this will include the displayname and avatar_url + fields. For remote queries it may contain arbitrary information. + """ target_user = UserID.from_string(user_id) if self.hs.is_mine(target_user): @@ -107,6 +118,15 @@ class ProfileHandler: raise e.to_synapse_error() async def get_displayname(self, target_user: UserID) -> Optional[str]: + """ + Fetch a user's display name from their profile. + + Args: + target_user: The user to fetch the display name of. + + Returns: + The user's display name or None if unset. + """ if self.hs.is_mine(target_user): try: displayname = await self.store.get_profile_displayname(target_user) @@ -203,6 +223,15 @@ class ProfileHandler: await self._update_join_states(requester, target_user) async def get_avatar_url(self, target_user: UserID) -> Optional[str]: + """ + Fetch a user's avatar URL from their profile. + + Args: + target_user: The user to fetch the avatar URL of. + + Returns: + The user's avatar URL or None if unset. + """ if self.hs.is_mine(target_user): try: avatar_url = await self.store.get_profile_avatar_url(target_user) @@ -403,6 +432,12 @@ class ProfileHandler: async def _update_join_states( self, requester: Requester, target_user: UserID ) -> None: + """ + Update the membership events of each room the user is joined to with the + new profile information. + + Note that this stomps over any custom display name or avatar URL in member events. + """ if not self.hs.is_mine(target_user): return diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 996aea808d..41cf08211f 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -144,6 +144,16 @@ class ProfileWorkerStore(SQLBaseStore): return 50 async def get_profileinfo(self, user_id: UserID) -> ProfileInfo: + """ + Fetch the display name and avatar URL of a user. + + Args: + user_id: The user ID to fetch the profile for. + + Returns: + The user's display name and avatar URL. Values may be null if unset + or if the user doesn't exist. + """ profile = await self.db_pool.simple_select_one( table="profiles", keyvalues={"full_user_id": user_id.to_string()}, @@ -158,6 +168,15 @@ class ProfileWorkerStore(SQLBaseStore): return ProfileInfo(avatar_url=profile[1], display_name=profile[0]) async def get_profile_displayname(self, user_id: UserID) -> Optional[str]: + """ + Fetch the display name of a user. + + Args: + user_id: The user to get the display name for. + + Raises: + 404 if the user does not exist. + """ return await self.db_pool.simple_select_one_onecol( table="profiles", keyvalues={"full_user_id": user_id.to_string()}, @@ -166,6 +185,15 @@ class ProfileWorkerStore(SQLBaseStore): ) async def get_profile_avatar_url(self, user_id: UserID) -> Optional[str]: + """ + Fetch the avatar URL of a user. + + Args: + user_id: The user to get the avatar URL for. + + Raises: + 404 if the user does not exist. + """ return await self.db_pool.simple_select_one_onecol( table="profiles", keyvalues={"full_user_id": user_id.to_string()}, @@ -174,6 +202,12 @@ class ProfileWorkerStore(SQLBaseStore): ) async def create_profile(self, user_id: UserID) -> None: + """ + Create a blank profile for a user. + + Args: + user_id: The user to create the profile for. + """ user_localpart = user_id.localpart await self.db_pool.simple_insert( table="profiles", From 6a11bdf01d5f2de758162f345c83c3ae4004e3d8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 13 Aug 2024 17:55:05 +0100 Subject: [PATCH 123/332] Add a utility function for generating fake event IDs (#17557) --- changelog.d/17557.misc | 1 + synapse/rest/client/room.py | 9 +++++---- synapse/util/events.py | 29 +++++++++++++++++++++++++++++ tests/handlers/test_federation.py | 6 +----- 4 files changed, 36 insertions(+), 9 deletions(-) create mode 100644 changelog.d/17557.misc create mode 100644 synapse/util/events.py diff --git a/changelog.d/17557.misc b/changelog.d/17557.misc new file mode 100644 index 0000000000..535f4b6e5f --- /dev/null +++ b/changelog.d/17557.misc @@ -0,0 +1 @@ +Add a utility function for generating random event IDs. \ No newline at end of file diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 903c74f6d8..7d57904d69 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -67,7 +67,8 @@ from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, Requester, StreamToken, ThirdPartyInstanceID, UserID from synapse.types.state import StateFilter from synapse.util.cancellation import cancellable -from synapse.util.stringutils import parse_and_validate_server_name, random_string +from synapse.util.events import generate_fake_event_id +from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: from synapse.server import HomeServer @@ -325,7 +326,7 @@ class RoomStateEventRestServlet(RestServlet): ) event_id = event.event_id except ShadowBanError: - event_id = "$" + random_string(43) + event_id = generate_fake_event_id() set_tag("event_id", event_id) ret = {"event_id": event_id} @@ -377,7 +378,7 @@ class RoomSendEventRestServlet(TransactionRestServlet): ) event_id = event.event_id except ShadowBanError: - event_id = "$" + random_string(43) + event_id = generate_fake_event_id() set_tag("event_id", event_id) return 200, {"event_id": event_id} @@ -1193,7 +1194,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): event_id = event.event_id except ShadowBanError: - event_id = "$" + random_string(43) + event_id = generate_fake_event_id() set_tag("event_id", event_id) return 200, {"event_id": event_id} diff --git a/synapse/util/events.py b/synapse/util/events.py new file mode 100644 index 0000000000..ad9b946578 --- /dev/null +++ b/synapse/util/events.py @@ -0,0 +1,29 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +# + +from synapse.util.stringutils import random_string + + +def generate_fake_event_id() -> str: + """ + Generate an event ID from random ASCII characters. + + This is primarily useful for generating fake event IDs in response to + requests from shadow-banned users. + + Returns: + A string intended to look like an event ID, but with no actual meaning. + """ + return "$" + random_string(43) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 3fe5b0a1b4..9847893fce 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -44,7 +44,7 @@ from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.storage.databases.main.events_worker import EventCacheEntry from synapse.util import Clock -from synapse.util.stringutils import random_string +from synapse.util.events import generate_fake_event_id from tests import unittest from tests.test_utils import event_injection @@ -52,10 +52,6 @@ from tests.test_utils import event_injection logger = logging.getLogger(__name__) -def generate_fake_event_id() -> str: - return "$fake_" + random_string(43) - - class FederationTestCase(unittest.FederatingHomeserverTestCase): servlets = [ admin.register_servlets, From a9fc1fd112162a50e361768788523c46d2655dc7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Aug 2024 17:59:47 +0100 Subject: [PATCH 124/332] Use a larger, dedicated threadpool for media sending (#17564) --- changelog.d/17564.misc | 1 + synapse/media/_base.py | 19 +++++++++++++------ synapse/media/media_storage.py | 12 +++++------- synapse/media/storage_provider.py | 2 +- synapse/media/thumbnailer.py | 6 +++--- synapse/server.py | 19 +++++++++++++++++++ tests/server.py | 6 ++++++ 7 files changed, 48 insertions(+), 17 deletions(-) create mode 100644 changelog.d/17564.misc diff --git a/changelog.d/17564.misc b/changelog.d/17564.misc new file mode 100644 index 0000000000..cfa8089a81 --- /dev/null +++ b/changelog.d/17564.misc @@ -0,0 +1 @@ +Speed up responding to media requests. diff --git a/synapse/media/_base.py b/synapse/media/_base.py index ad80098e9f..89dea39163 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -51,15 +51,15 @@ from synapse.api.errors import Codes, cs_error from synapse.http.server import finish_request, respond_with_json from synapse.http.site import SynapseRequest from synapse.logging.context import ( - defer_to_thread, + defer_to_threadpool, make_deferred_yieldable, run_in_background, ) -from synapse.types import ISynapseReactor from synapse.util import Clock from synapse.util.stringutils import is_ascii if TYPE_CHECKING: + from synapse.server import HomeServer from synapse.storage.databases.main.media_repository import LocalMedia @@ -132,6 +132,7 @@ def respond_404(request: SynapseRequest) -> None: async def respond_with_file( + hs: "HomeServer", request: SynapseRequest, media_type: str, file_path: str, @@ -148,7 +149,7 @@ async def respond_with_file( add_file_headers(request, media_type, file_size, upload_name) with open(file_path, "rb") as f: - await ThreadedFileSender(request.reactor).beginFileTransfer(f, request) + await ThreadedFileSender(hs).beginFileTransfer(f, request) finish_request(request) else: @@ -632,8 +633,9 @@ class ThreadedFileSender: # read. TIMEOUT_SECONDS = 90.0 - def __init__(self, reactor: ISynapseReactor) -> None: - self.reactor = reactor + def __init__(self, hs: "HomeServer") -> None: + self.reactor = hs.get_reactor() + self.thread_pool = hs.get_media_sender_thread_pool() self.file: Optional[BinaryIO] = None self.deferred: "Deferred[None]" = Deferred() @@ -661,7 +663,12 @@ class ThreadedFileSender: # We set the wakeup signal as we should start producing immediately. self.wakeup_event.set() - run_in_background(defer_to_thread, self.reactor, self._on_thread_read_loop) + run_in_background( + defer_to_threadpool, + self.reactor, + self.thread_pool, + self._on_thread_read_loop, + ) return make_deferred_yieldable(self.deferred) diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index e06273c92f..cf4208eb71 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -57,7 +57,7 @@ from synapse.media._base import ThreadedFileSender from synapse.util import Clock from synapse.util.file_consumer import BackgroundFileConsumer -from ..types import ISynapseReactor, JsonDict +from ..types import JsonDict from ._base import FileInfo, Responder from .filepath import MediaFilePaths @@ -209,7 +209,7 @@ class MediaStorage: local_path = os.path.join(self.local_media_directory, path) if os.path.exists(local_path): logger.debug("responding with local file %s", local_path) - return FileResponder(self.reactor, open(local_path, "rb")) + return FileResponder(self.hs, open(local_path, "rb")) logger.debug("local file %s did not exist", local_path) for provider in self.storage_providers: @@ -332,14 +332,12 @@ class FileResponder(Responder): is closed when finished streaming. """ - def __init__(self, reactor: ISynapseReactor, open_file: BinaryIO): - self.reactor = reactor + def __init__(self, hs: "HomeServer", open_file: BinaryIO): + self.hs = hs self.open_file = open_file def write_to_consumer(self, consumer: IConsumer) -> Deferred: - return ThreadedFileSender(self.reactor).beginFileTransfer( - self.open_file, consumer - ) + return ThreadedFileSender(self.hs).beginFileTransfer(self.open_file, consumer) def __exit__( self, diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py index 355df999d2..300952025a 100644 --- a/synapse/media/storage_provider.py +++ b/synapse/media/storage_provider.py @@ -178,7 +178,7 @@ class FileStorageProviderBackend(StorageProvider): backup_fname = os.path.join(self.base_directory, path) if os.path.isfile(backup_fname): - return FileResponder(self.reactor, open(backup_fname, "rb")) + return FileResponder(self.hs, open(backup_fname, "rb")) return None diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index 3380315b27..042851021c 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -374,11 +374,11 @@ class ThumbnailProvider: await respond_with_multipart_responder( self.hs.get_clock(), request, - FileResponder(self.reactor, open(file_path, "rb")), + FileResponder(self.hs, open(file_path, "rb")), media_info, ) else: - await respond_with_file(request, desired_type, file_path) + await respond_with_file(self.hs, request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") raise SynapseError(400, "Failed to generate thumbnail.") @@ -456,7 +456,7 @@ class ThumbnailProvider: ) if file_path: - await respond_with_file(request, desired_type, file_path) + await respond_with_file(self.hs, request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") raise SynapseError(400, "Failed to generate thumbnail.") diff --git a/synapse/server.py b/synapse/server.py index 46b9d83a04..8b07bb39a0 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -34,6 +34,7 @@ from typing_extensions import TypeAlias from twisted.internet.interfaces import IOpenSSLContextFactory from twisted.internet.tcp import Port +from twisted.python.threadpool import ThreadPool from twisted.web.iweb import IPolicyForHTTPS from twisted.web.resource import Resource @@ -941,3 +942,21 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_task_scheduler(self) -> TaskScheduler: return TaskScheduler(self) + + @cache_in_self + def get_media_sender_thread_pool(self) -> ThreadPool: + """Fetch the threadpool used to read files when responding to media + download requests.""" + + # We can choose a large threadpool size as these threads predominately + # do IO rather than CPU work. + media_threadpool = ThreadPool( + name="media_threadpool", minthreads=1, maxthreads=50 + ) + + media_threadpool.start() + self.get_reactor().addSystemEventTrigger( + "during", "shutdown", media_threadpool.stop + ) + + return media_threadpool diff --git a/tests/server.py b/tests/server.py index 3e377585ce..95aff6f66c 100644 --- a/tests/server.py +++ b/tests/server.py @@ -1166,6 +1166,12 @@ def setup_test_homeserver( hs.get_auth_handler().validate_hash = validate_hash # type: ignore[assignment] + # We need to replace the media threadpool with the fake test threadpool. + def thread_pool() -> threadpool.ThreadPool: + return reactor.getThreadPool() + + hs.get_media_sender_thread_pool = thread_pool # type: ignore[method-assign] + # Load any configured modules into the homeserver module_api = hs.get_module_api() for module, module_config in hs.config.modules.loaded_modules: From a308d99f30d7e660115e355c54c37ac149cdbe53 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 13 Aug 2024 12:27:42 -0500 Subject: [PATCH 125/332] Sliding Sync: Exclude partially stated rooms if we must await full state (#17538) Previously, we just had very basic partial room exclusion based on whether we were lazy-loading room members. Now with this PR, we added `must_await_full_state(...)` with rules to check if we have a we're only requesting `required_state` which is completely satisfied even with partial state. Partially-stated rooms should have all state events except for remote membership events so if we require a remote membership event anywhere, then we need to return `True`. --- changelog.d/17538.bugfix | 1 + synapse/handlers/sliding_sync.py | 104 +++++++-- .../sliding_sync/test_rooms_required_state.py | 217 ++++++++++++++---- 3 files changed, 266 insertions(+), 56 deletions(-) create mode 100644 changelog.d/17538.bugfix diff --git a/changelog.d/17538.bugfix b/changelog.d/17538.bugfix new file mode 100644 index 0000000000..9e4e31dbdb --- /dev/null +++ b/changelog.d/17538.bugfix @@ -0,0 +1 @@ +Better exclude partially stated rooms if we must await full state in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 18a96843be..99510254f3 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -24,6 +24,7 @@ from itertools import chain from typing import ( TYPE_CHECKING, Any, + Callable, Dict, Final, List, @@ -366,6 +367,73 @@ class RoomSyncConfig: else: self.required_state_map[state_type].add(state_key) + def must_await_full_state( + self, + is_mine_id: Callable[[str], bool], + ) -> bool: + """ + Check if we have a we're only requesting `required_state` which is completely + satisfied even with partial state, then we don't need to `await_full_state` before + we can return it. + + Also see `StateFilter.must_await_full_state(...)` for comparison + + Partially-stated rooms should have all state events except for remote membership + events so if we require a remote membership event anywhere, then we need to + return `True` (requires full state). + + Args: + is_mine_id: a callable which confirms if a given state_key matches a mxid + of a local user + """ + wildcard_state_keys = self.required_state_map.get(StateValues.WILDCARD) + # Requesting *all* state in the room so we have to wait + if ( + wildcard_state_keys is not None + and StateValues.WILDCARD in wildcard_state_keys + ): + return True + + # If the wildcards don't refer to remote user IDs, then we don't need to wait + # for full state. + if wildcard_state_keys is not None: + for possible_user_id in wildcard_state_keys: + if not possible_user_id[0].startswith(UserID.SIGIL): + # Not a user ID + continue + + localpart_hostname = possible_user_id.split(":", 1) + if len(localpart_hostname) < 2: + # Not a user ID + continue + + if not is_mine_id(possible_user_id): + return True + + membership_state_keys = self.required_state_map.get(EventTypes.Member) + # We aren't requesting any membership events at all so the partial state will + # cover us. + if membership_state_keys is None: + return False + + # If we're requesting entirely local users, the partial state will cover us. + for user_id in membership_state_keys: + if user_id == StateValues.ME: + continue + # We're lazy-loading membership so we can just return the state we have. + # Lazy-loading means we include membership for any event `sender` in the + # timeline but since we had to auth those timeline events, we will have the + # membership state for them (including from remote senders). + elif user_id == StateValues.LAZY: + continue + elif user_id == StateValues.WILDCARD: + return False + elif not is_mine_id(user_id): + return True + + # Local users only so the partial state will cover us. + return False + class StateValues: """ @@ -395,6 +463,7 @@ class SlidingSyncHandler: self.device_handler = hs.get_device_handler() self.push_rules_handler = hs.get_push_rules_handler() self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync + self.is_mine_id = hs.is_mine_id self.connection_store = SlidingSyncConnectionStore() @@ -575,19 +644,10 @@ class SlidingSyncHandler: # Since creating the `RoomSyncConfig` takes some work, let's just do it # once and make a copy whenever we need it. room_sync_config = RoomSyncConfig.from_room_config(list_config) - membership_state_keys = room_sync_config.required_state_map.get( - EventTypes.Member - ) - # Also see `StateFilter.must_await_full_state(...)` for comparison - lazy_loading = ( - membership_state_keys is not None - and StateValues.LAZY in membership_state_keys - ) - if not lazy_loading: - # Exclude partially-stated rooms unless the `required_state` - # only has `["m.room.member", "$LAZY"]` for membership - # (lazy-loading room members). + # Exclude partially-stated rooms if we must wait for the room to be + # fully-stated + if room_sync_config.must_await_full_state(self.is_mine_id): filtered_sync_room_map = { room_id: room for room_id, room in filtered_sync_room_map.items() @@ -654,6 +714,12 @@ class SlidingSyncHandler: # Handle room subscriptions if has_room_subscriptions and sync_config.room_subscriptions is not None: with start_active_span("assemble_room_subscriptions"): + # Find which rooms are partially stated and may need to be filtered out + # depending on the `required_state` requested (see below). + partial_state_room_map = await self.store.is_partial_state_room_batched( + sync_config.room_subscriptions.keys() + ) + for ( room_id, room_subscription, @@ -677,12 +743,20 @@ class SlidingSyncHandler: ) # Take the superset of the `RoomSyncConfig` for each room. - # - # Update our `relevant_room_map` with the room we're going to display - # and need to fetch more info about. room_sync_config = RoomSyncConfig.from_room_config( room_subscription ) + + # Exclude partially-stated rooms if we must wait for the room to be + # fully-stated + if room_sync_config.must_await_full_state(self.is_mine_id): + if partial_state_room_map.get(room_id): + continue + + all_rooms.add(room_id) + + # Update our `relevant_room_map` with the room we're going to display + # and need to fetch more info about. existing_room_sync_config = relevant_room_map.get(room_id) if existing_room_sync_config is not None: existing_room_sync_config.combine_room_sync_config( diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index a13cad223f..823e7db569 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -631,8 +631,7 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): def test_rooms_required_state_partial_state(self) -> None: """ - Test partially-stated room are excluded unless `rooms.required_state` is - lazy-loading room members. + Test partially-stated room are excluded if they require full state. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -649,59 +648,195 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): mark_event_as_partial_state(self.hs, join_response2["event_id"], room_id2) ) - # Make the Sliding Sync request (NOT lazy-loading room members) + # Make the Sliding Sync request with examples where `must_await_full_state()` is + # `False` sync_body = { "lists": { - "foo-list": { + "no-state-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + }, + "other-state-list": { "ranges": [[0, 1]], "required_state": [ [EventTypes.Create, ""], ], "timeline_limit": 0, }, + "lazy-load-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + # Lazy-load room members + [EventTypes.Member, StateValues.LAZY], + # Local member + [EventTypes.Member, user2_id], + ], + "timeline_limit": 0, + }, + "local-members-only-list": { + "ranges": [[0, 1]], + "required_state": [ + # Own user ID + [EventTypes.Member, user1_id], + # Local member + [EventTypes.Member, user2_id], + ], + "timeline_limit": 0, + }, + "me-list": { + "ranges": [[0, 1]], + "required_state": [ + # Own user ID + [EventTypes.Member, StateValues.ME], + # Local member + [EventTypes.Member, user2_id], + ], + "timeline_limit": 0, + }, + "wildcard-type-local-state-key-list": { + "ranges": [[0, 1]], + "required_state": [ + ["*", user1_id], + # Not a user ID + ["*", "foobarbaz"], + # Not a user ID + ["*", "foo.bar.baz"], + # Not a user ID + ["*", "@foo"], + ], + "timeline_limit": 0, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # The list should include both rooms now because we don't need full state + for list_key in response_body["lists"].keys(): + self.assertIncludes( + set(response_body["lists"][list_key]["ops"][0]["room_ids"]), + {room_id2, room_id1}, + exact=True, + message=f"Expected all rooms to show up for list_key={list_key}. Response " + + str(response_body["lists"][list_key]), + ) + + # Take each of the list variants and apply them to room subscriptions to make + # sure the same rules apply + for list_key in sync_body["lists"].keys(): + sync_body_for_subscriptions = { + "room_subscriptions": { + room_id1: { + "required_state": sync_body["lists"][list_key][ + "required_state" + ], + "timeline_limit": 0, + }, + room_id2: { + "required_state": sync_body["lists"][list_key][ + "required_state" + ], + "timeline_limit": 0, + }, + } + } + response_body, _ = self.do_sync(sync_body_for_subscriptions, tok=user1_tok) + + self.assertIncludes( + set(response_body["rooms"].keys()), + {room_id2, room_id1}, + exact=True, + message=f"Expected all rooms to show up for test_key={list_key}.", + ) + + # ===================================================================== + + # Make the Sliding Sync request with examples where `must_await_full_state()` is + # `True` + sync_body = { + "lists": { + "wildcard-list": { + "ranges": [[0, 1]], + "required_state": [ + ["*", "*"], + ], + "timeline_limit": 0, + }, + "wildcard-type-remote-state-key-list": { + "ranges": [[0, 1]], + "required_state": [ + ["*", "@some:remote"], + # Not a user ID + ["*", "foobarbaz"], + # Not a user ID + ["*", "foo.bar.baz"], + # Not a user ID + ["*", "@foo"], + ], + "timeline_limit": 0, + }, + "remote-member-list": { + "ranges": [[0, 1]], + "required_state": [ + # Own user ID + [EventTypes.Member, user1_id], + # Remote member + [EventTypes.Member, "@some:remote"], + # Local member + [EventTypes.Member, user2_id], + ], + "timeline_limit": 0, + }, + "lazy-but-remote-member-list": { + "ranges": [[0, 1]], + "required_state": [ + # Lazy-load room members + [EventTypes.Member, StateValues.LAZY], + # Remote member + [EventTypes.Member, "@some:remote"], + ], + "timeline_limit": 0, + }, } } response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Make sure the list includes room1 but room2 is excluded because it's still # partially-stated - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 1], - "room_ids": [room_id1], - } - ], - response_body["lists"]["foo-list"], - ) + for list_key in response_body["lists"].keys(): + self.assertIncludes( + set(response_body["lists"][list_key]["ops"][0]["room_ids"]), + {room_id1}, + exact=True, + message=f"Expected only fully-stated rooms to show up for list_key={list_key}. Response " + + str(response_body["lists"][list_key]), + ) - # Make the Sliding Sync request (with lazy-loading room members) - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - # Lazy-load room members - [EventTypes.Member, StateValues.LAZY], - ], - "timeline_limit": 0, - }, + # Take each of the list variants and apply them to room subscriptions to make + # sure the same rules apply + for list_key in sync_body["lists"].keys(): + sync_body_for_subscriptions = { + "room_subscriptions": { + room_id1: { + "required_state": sync_body["lists"][list_key][ + "required_state" + ], + "timeline_limit": 0, + }, + room_id2: { + "required_state": sync_body["lists"][list_key][ + "required_state" + ], + "timeline_limit": 0, + }, + } } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) + response_body, _ = self.do_sync(sync_body_for_subscriptions, tok=user1_tok) - # The list should include both rooms now because we're lazy-loading room members - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 1], - "room_ids": [room_id2, room_id1], - } - ], - response_body["lists"]["foo-list"], - ) + self.assertIncludes( + set(response_body["rooms"].keys()), + {room_id1}, + exact=True, + message=f"Expected only fully-stated rooms to show up for test_key={list_key}.", + ) From b05b2e14bbba0041e7818213b0885ec65540e617 Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 14 Aug 2024 01:49:01 -0700 Subject: [PATCH 126/332] Handle lower-case http headers in `_Mulitpart_Parser_Protocol` (#17545) --- changelog.d/17545.bugfix | 1 + synapse/http/client.py | 6 +++--- tests/http/test_client.py | 42 +++++++++++++++++++++++++++++++-------- 3 files changed, 38 insertions(+), 11 deletions(-) create mode 100644 changelog.d/17545.bugfix diff --git a/changelog.d/17545.bugfix b/changelog.d/17545.bugfix new file mode 100644 index 0000000000..31e22d873e --- /dev/null +++ b/changelog.d/17545.bugfix @@ -0,0 +1 @@ +Handle lower-case http headers in `_Mulitpart_Parser_Protocol`. \ No newline at end of file diff --git a/synapse/http/client.py b/synapse/http/client.py index daa5cc899b..cb4f72d771 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1057,11 +1057,11 @@ class _MultipartParserProtocol(protocol.Protocol): if not self.parser: def on_header_field(data: bytes, start: int, end: int) -> None: - if data[start:end] == b"Location": + if data[start:end].lower() == b"location": self.has_redirect = True - if data[start:end] == b"Content-Disposition": + if data[start:end].lower() == b"content-disposition": self.in_disposition = True - if data[start:end] == b"Content-Type": + if data[start:end].lower() == b"content-type": self.in_content_type = True def on_header_value(data: bytes, start: int, end: int) -> None: diff --git a/tests/http/test_client.py b/tests/http/test_client.py index 721917f957..f2abec190b 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -49,8 +49,11 @@ from tests.unittest import TestCase class ReadMultipartResponseTests(TestCase): - data1 = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_" - data2 = b"to_stream\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n" + multipart_response_data1 = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_" + multipart_response_data2 = ( + b"to_stream\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n" + ) + multipart_response_data_cased = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\ncOntEnt-type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-tyPe: text/plain\r\nconTent-dispOsition: inline; filename=test_upload\r\n\r\nfile_" redirect_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nLocation: https://cdn.example.org/ab/c1/2345.txt\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n" @@ -103,8 +106,31 @@ class ReadMultipartResponseTests(TestCase): result, deferred, protocol = self._build_multipart_response(249, 250) # Start sending data. - protocol.dataReceived(self.data1) - protocol.dataReceived(self.data2) + protocol.dataReceived(self.multipart_response_data1) + protocol.dataReceived(self.multipart_response_data2) + # Close the connection. + protocol.connectionLost(Failure(ResponseDone())) + + multipart_response: MultipartResponse = deferred.result # type: ignore[assignment] + + self.assertEqual(multipart_response.json, b"{}") + self.assertEqual(result.getvalue(), b"file_to_stream") + self.assertEqual(multipart_response.length, len(b"file_to_stream")) + self.assertEqual(multipart_response.content_type, b"text/plain") + self.assertEqual( + multipart_response.disposition, b"inline; filename=test_upload" + ) + + def test_parse_file_lowercase_headers(self) -> None: + """ + Check that a multipart response containing a file is properly parsed + into the json/file parts, and the json and file are properly captured if the http headers are lowercased + """ + result, deferred, protocol = self._build_multipart_response(249, 250) + + # Start sending data. + protocol.dataReceived(self.multipart_response_data_cased) + protocol.dataReceived(self.multipart_response_data2) # Close the connection. protocol.connectionLost(Failure(ResponseDone())) @@ -143,7 +169,7 @@ class ReadMultipartResponseTests(TestCase): result, deferred, protocol = self._build_multipart_response(UNKNOWN_LENGTH, 180) # Start sending data. - protocol.dataReceived(self.data1) + protocol.dataReceived(self.multipart_response_data1) self.assertEqual(result.getvalue(), b"file_") self._assert_error(deferred, protocol) @@ -154,11 +180,11 @@ class ReadMultipartResponseTests(TestCase): result, deferred, protocol = self._build_multipart_response(UNKNOWN_LENGTH, 180) # Start sending data. - protocol.dataReceived(self.data1) + protocol.dataReceived(self.multipart_response_data1) self._assert_error(deferred, protocol) # More data might have come in. - protocol.dataReceived(self.data2) + protocol.dataReceived(self.multipart_response_data2) self.assertEqual(result.getvalue(), b"file_") self._assert_error(deferred, protocol) @@ -172,7 +198,7 @@ class ReadMultipartResponseTests(TestCase): self.assertFalse(deferred.called) # Start sending data. - protocol.dataReceived(self.data1) + protocol.dataReceived(self.multipart_response_data1) self._assert_error(deferred, protocol) self._cleanup_error(deferred) From a51daffba5e58489f93f76a074aa7d6f73533226 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Aug 2024 12:41:53 +0100 Subject: [PATCH 127/332] Reduce concurrent thread usage in media (#17567) Follow on from #17558 Basically, we want to reduce the number of threads we want to use at a time, i.e. reduce the number of threads that are paused/blocked. We do this by returning from the thread when the consumer pauses the producer, rather than pausing in the thread. --------- Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17567.misc | 1 + synapse/media/_base.py | 91 +++++++++++++++++++---------------- synapse/util/async_helpers.py | 43 +++++++++++++++++ 3 files changed, 93 insertions(+), 42 deletions(-) create mode 100644 changelog.d/17567.misc diff --git a/changelog.d/17567.misc b/changelog.d/17567.misc new file mode 100644 index 0000000000..cfa8089a81 --- /dev/null +++ b/changelog.d/17567.misc @@ -0,0 +1 @@ +Speed up responding to media requests. diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 89dea39163..fdbbe29472 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -22,7 +22,6 @@ import logging import os -import threading import urllib from abc import ABC, abstractmethod from types import TracebackType @@ -56,6 +55,7 @@ from synapse.logging.context import ( run_in_background, ) from synapse.util import Clock +from synapse.util.async_helpers import DeferredEvent from synapse.util.stringutils import is_ascii if TYPE_CHECKING: @@ -620,10 +620,13 @@ class ThreadedFileSender: A producer that sends the contents of a file to a consumer, reading from the file on a thread. - This works by spawning a loop in a threadpool that repeatedly reads from the - file and sends it to the consumer. The main thread communicates with the - loop via two `threading.Event`, which controls when to start/pause reading - and when to terminate. + This works by having a loop in a threadpool repeatedly reading from the + file, until the consumer pauses the producer. There is then a loop in the + main thread that waits until the consumer resumes the producer and then + starts reading in the threadpool again. + + This is done to ensure that we're never waiting in the threadpool, as + otherwise its easy to starve it of threads. """ # How much data to read in one go. @@ -643,12 +646,11 @@ class ThreadedFileSender: # Signals if the thread should keep reading/sending data. Set means # continue, clear means pause. - self.wakeup_event = threading.Event() + self.wakeup_event = DeferredEvent(self.reactor) # Signals if the thread should terminate, e.g. because the consumer has - # gone away. Both this and `wakeup_event` should be set to terminate the - # loop (otherwise the thread will block on `wakeup_event`). - self.stop_event = threading.Event() + # gone away. + self.stop_writing = False def beginFileTransfer( self, file: BinaryIO, consumer: interfaces.IConsumer @@ -663,12 +665,7 @@ class ThreadedFileSender: # We set the wakeup signal as we should start producing immediately. self.wakeup_event.set() - run_in_background( - defer_to_threadpool, - self.reactor, - self.thread_pool, - self._on_thread_read_loop, - ) + run_in_background(self.start_read_loop) return make_deferred_yieldable(self.deferred) @@ -686,42 +683,52 @@ class ThreadedFileSender: # Unregister the consumer so we don't try and interact with it again. self.consumer = None - # Terminate the thread loop. + # Terminate the loop. + self.stop_writing = True self.wakeup_event.set() - self.stop_event.set() if not self.deferred.called: self.deferred.errback(Exception("Consumer asked us to stop producing")) - def _on_thread_read_loop(self) -> None: - """This is the loop that happens on a thread.""" - + async def start_read_loop(self) -> None: + """This is the loop that drives reading/writing""" try: - while not self.stop_event.is_set(): - # We wait for the producer to signal that the consumer wants - # more data (or we should abort) + while not self.stop_writing: + # Start the loop in the threadpool to read data. + more_data = await defer_to_threadpool( + self.reactor, self.thread_pool, self._on_thread_read_loop + ) + if not more_data: + # Reached EOF, we can just return. + return + if not self.wakeup_event.is_set(): - ret = self.wakeup_event.wait(self.TIMEOUT_SECONDS) + ret = await self.wakeup_event.wait(self.TIMEOUT_SECONDS) if not ret: raise Exception("Timed out waiting to resume") - - # Check if we were woken up so that we abort the download - if self.stop_event.is_set(): - return - - # The file should always have been set before we get here. - assert self.file is not None - - chunk = self.file.read(self.CHUNK_SIZE) - if not chunk: - return - - self.reactor.callFromThread(self._write, chunk) - except Exception: - self.reactor.callFromThread(self._error, Failure()) + self._error(Failure()) finally: - self.reactor.callFromThread(self._finish) + self._finish() + + def _on_thread_read_loop(self) -> bool: + """This is the loop that happens on a thread. + + Returns: + Whether there is more data to send. + """ + + while not self.stop_writing and self.wakeup_event.is_set(): + # The file should always have been set before we get here. + assert self.file is not None + + chunk = self.file.read(self.CHUNK_SIZE) + if not chunk: + return False + + self.reactor.callFromThread(self._write, chunk) + + return True def _write(self, chunk: bytes) -> None: """Called from the thread to write a chunk of data""" @@ -729,7 +736,7 @@ class ThreadedFileSender: self.consumer.write(chunk) def _error(self, failure: Failure) -> None: - """Called from the thread when there was a fatal error""" + """Called when there was a fatal error""" if self.consumer: self.consumer.unregisterProducer() self.consumer = None @@ -738,7 +745,7 @@ class ThreadedFileSender: self.deferred.errback(failure) def _finish(self) -> None: - """Called from the thread when it finishes (either on success or + """Called when we have finished writing (either on success or failure).""" if self.file: self.file.close() diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 70139beef2..8618bb0651 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -885,3 +885,46 @@ class AwakenableSleeper: # Cancel the sleep if we were woken up if call.active(): call.cancel() + + +class DeferredEvent: + """Like threading.Event but for async code""" + + def __init__(self, reactor: IReactorTime) -> None: + self._reactor = reactor + self._deferred: "defer.Deferred[None]" = defer.Deferred() + + def set(self) -> None: + if not self._deferred.called: + self._deferred.callback(None) + + def clear(self) -> None: + if self._deferred.called: + self._deferred = defer.Deferred() + + def is_set(self) -> bool: + return self._deferred.called + + async def wait(self, timeout_seconds: float) -> bool: + if self.is_set(): + return True + + # Create a deferred that gets called in N seconds + sleep_deferred: "defer.Deferred[None]" = defer.Deferred() + call = self._reactor.callLater(timeout_seconds, sleep_deferred.callback, None) + + try: + await make_deferred_yieldable( + defer.DeferredList( + [sleep_deferred, self._deferred], + fireOnOneCallback=True, + fireOnOneErrback=True, + consumeErrors=True, + ) + ) + finally: + # Cancel the sleep if we were woken up + if call.active(): + call.cancel() + + return self.is_set() From 1892ba5f67fa17685ff036f85939396e075bac4a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Aug 2024 13:46:22 +0100 Subject: [PATCH 128/332] Fix 'Producer was not unregistered' error (#17569) Follows on from #17567 --- changelog.d/17569.misc | 1 + synapse/media/_base.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/17569.misc diff --git a/changelog.d/17569.misc b/changelog.d/17569.misc new file mode 100644 index 0000000000..cfa8089a81 --- /dev/null +++ b/changelog.d/17569.misc @@ -0,0 +1 @@ +Speed up responding to media requests. diff --git a/synapse/media/_base.py b/synapse/media/_base.py index fdbbe29472..9341d4859e 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -681,6 +681,9 @@ class ThreadedFileSender: """interfaces.IPushProducer""" # Unregister the consumer so we don't try and interact with it again. + if self.consumer: + self.consumer.unregisterProducer() + self.consumer = None # Terminate the loop. From f77bfbfa30d5878d6d8001411bba037e034fa1de Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Wed, 14 Aug 2024 16:13:56 +0300 Subject: [PATCH 129/332] Fix fetching signing keys when `old_verify_keys` is omitted (#17568) `old_verify_keys` isn't marked as required in https://spec.matrix.org/v1.11/server-server-api/#get_matrixkeyv2server and there's no functional difference between an empty object and omitting the object, so I don't think there's any reason synapse should explode when the field is omitted. --- changelog.d/17568.bugfix | 1 + synapse/crypto/keyring.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17568.bugfix diff --git a/changelog.d/17568.bugfix b/changelog.d/17568.bugfix new file mode 100644 index 0000000000..71a1f12915 --- /dev/null +++ b/changelog.d/17568.bugfix @@ -0,0 +1 @@ +Fix fetching federation signing keys from servers that omit `old_verify_keys`. Contributed by @tulir @ Beeper. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 8c301e077c..643d2d4e66 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -589,7 +589,7 @@ class BaseV2KeyFetcher(KeyFetcher): % (server_name,) ) - for key_id, key_data in response_json["old_verify_keys"].items(): + for key_id, key_data in response_json.get("old_verify_keys", {}).items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) From fae75b03765d4658427aa6affa031a270f8b5886 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 14 Aug 2024 15:11:22 +0100 Subject: [PATCH 130/332] Register the media threadpool with our metrics (#17566) --- changelog.d/17566.misc | 1 + synapse/server.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/17566.misc diff --git a/changelog.d/17566.misc b/changelog.d/17566.misc new file mode 100644 index 0000000000..7210753fa3 --- /dev/null +++ b/changelog.d/17566.misc @@ -0,0 +1 @@ +Speed up responding to media requests. \ No newline at end of file diff --git a/synapse/server.py b/synapse/server.py index 8b07bb39a0..d6c9cbdac0 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -124,6 +124,7 @@ from synapse.http.client import ( ) from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.media.media_repository import MediaRepository +from synapse.metrics import register_threadpool from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager from synapse.module_api import ModuleApi from synapse.module_api.callbacks import ModuleApiCallbacks @@ -959,4 +960,7 @@ class HomeServer(metaclass=abc.ABCMeta): "during", "shutdown", media_threadpool.stop ) + # Register the threadpool with our metrics. + register_threadpool("media", media_threadpool) + return media_threadpool From 9ce489be5ed13148f49883239740e780d27157ae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 16 Aug 2024 08:54:57 +0100 Subject: [PATCH 131/332] Add a flag to /versions about SSS support (#17571) So that clients can check for support. Note that if the feature is only enabled for some users, the `/versions` request must be authenticated to pick up that SSS is enabled for the user --- changelog.d/17571.misc | 1 + synapse/rest/client/versions.py | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 changelog.d/17571.misc diff --git a/changelog.d/17571.misc b/changelog.d/17571.misc new file mode 100644 index 0000000000..67182a4fcd --- /dev/null +++ b/changelog.d/17571.misc @@ -0,0 +1 @@ +Add a flag to `/versions`, `org.matrix.simplified_msc3575`, to indicate whether experimental sliding sync support has been enabled. diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 75df684416..874869dc2d 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -64,6 +64,7 @@ class VersionsRestServlet(RestServlet): async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: msc3881_enabled = self.config.experimental.msc3881_enabled + msc3575_enabled = self.config.experimental.msc3575_enabled if self.auth.has_access_token(request): requester = await self.auth.get_user_by_req( @@ -77,6 +78,9 @@ class VersionsRestServlet(RestServlet): msc3881_enabled = await self.store.is_feature_enabled( user_id, ExperimentalFeature.MSC3881 ) + msc3575_enabled = await self.store.is_feature_enabled( + user_id, ExperimentalFeature.MSC3575 + ) return ( 200, @@ -169,6 +173,8 @@ class VersionsRestServlet(RestServlet): ), # MSC4151: Report room API (Client-Server API) "org.matrix.msc4151": self.config.experimental.msc4151_enabled, + # Simplified sliding sync + "org.matrix.simplified_msc3575": msc3575_enabled, }, }, ) From f162c92f2a1f8cf41b5f7211cb31cf8ae49b20e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 16 Aug 2024 16:04:02 +0100 Subject: [PATCH 132/332] Speed up `/keys/changes` (#17548) Follow on from #17537. This is just adding a batched lookup function (you might want to hide whitespace in the diff). --- changelog.d/17548.misc | 1 + synapse/handlers/device.py | 32 ++++------ .../storage/databases/main/state_deltas.py | 64 ++++++++++++++++++- 3 files changed, 77 insertions(+), 20 deletions(-) create mode 100644 changelog.d/17548.misc diff --git a/changelog.d/17548.misc b/changelog.d/17548.misc new file mode 100644 index 0000000000..861b241dcd --- /dev/null +++ b/changelog.d/17548.misc @@ -0,0 +1 @@ +Fix performance of device lists in `/key/changes` and sliding sync. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index ce26c91a7b..4f2a9f3a5b 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -267,31 +267,27 @@ class DeviceWorkerHandler: newly_left_rooms.add(change.room_id) # We now work out if any other users have since joined or left the rooms - # the user is currently in. First we filter out rooms that we know - # haven't changed recently. - rooms_changed = self.store.get_rooms_that_changed( - joined_room_ids, from_token.room_key - ) + # the user is currently in. # List of membership changes per room room_to_deltas: Dict[str, List[StateDelta]] = {} # The set of event IDs of membership events (so we can fetch their # associated membership). memberships_to_fetch: Set[str] = set() - for room_id in rooms_changed: - # TODO: Only pull out membership events? - state_changes = await self.store.get_current_state_deltas_for_room( - room_id, from_token=from_token.room_key, to_token=now_token.room_key - ) - for delta in state_changes: - if delta.event_type != EventTypes.Member: - continue - room_to_deltas.setdefault(room_id, []).append(delta) - if delta.event_id: - memberships_to_fetch.add(delta.event_id) - if delta.prev_event_id: - memberships_to_fetch.add(delta.prev_event_id) + # TODO: Only pull out membership events? + state_changes = await self.store.get_current_state_deltas_for_rooms( + joined_room_ids, from_token=from_token.room_key, to_token=now_token.room_key + ) + for delta in state_changes: + if delta.event_type != EventTypes.Member: + continue + + room_to_deltas.setdefault(delta.room_id, []).append(delta) + if delta.event_id: + memberships_to_fetch.add(delta.event_id) + if delta.prev_event_id: + memberships_to_fetch.add(delta.prev_event_id) # Fetch all the memberships for the membership events event_id_to_memberships = await self.store.get_membership_from_event_ids( diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index 7d491d1728..eaa13da368 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -26,10 +26,11 @@ import attr from synapse.logging.opentracing import trace from synapse.storage._base import SQLBaseStore -from synapse.storage.database import LoggingTransaction +from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause from synapse.storage.databases.main.stream import _filter_results_by_stream -from synapse.types import RoomStreamToken +from synapse.types import RoomStreamToken, StrCollection from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.util.iterutils import batch_iter logger = logging.getLogger(__name__) @@ -200,3 +201,62 @@ class StateDeltasStore(SQLBaseStore): return await self.db_pool.runInteraction( "get_current_state_deltas_for_room", get_current_state_deltas_for_room_txn ) + + @trace + async def get_current_state_deltas_for_rooms( + self, + room_ids: StrCollection, + from_token: RoomStreamToken, + to_token: RoomStreamToken, + ) -> List[StateDelta]: + """Get the state deltas between two tokens for the set of rooms.""" + + room_ids = self._curr_state_delta_stream_cache.get_entities_changed( + room_ids, from_token.stream + ) + if not room_ids: + return [] + + def get_current_state_deltas_for_rooms_txn( + txn: LoggingTransaction, + room_ids: StrCollection, + ) -> List[StateDelta]: + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", room_ids + ) + + sql = f""" + SELECT instance_name, stream_id, room_id, type, state_key, event_id, prev_event_id + FROM current_state_delta_stream + WHERE {clause} AND ? < stream_id AND stream_id <= ? + ORDER BY stream_id ASC + """ + args.append(from_token.stream) + args.append(to_token.get_max_stream_pos()) + + txn.execute(sql, args) + + return [ + StateDelta( + stream_id=row[1], + room_id=row[2], + event_type=row[3], + state_key=row[4], + event_id=row[5], + prev_event_id=row[6], + ) + for row in txn + if _filter_results_by_stream(from_token, to_token, row[0], row[1]) + ] + + results = [] + for batch in batch_iter(room_ids, 1000): + deltas = await self.db_pool.runInteraction( + "get_current_state_deltas_for_rooms", + get_current_state_deltas_for_rooms_txn, + batch, + ) + + results.extend(deltas) + + return results From a5d25bb623de3602b4d1d00b09e1d3e9ce60b4fc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 19 Aug 2024 14:15:36 +0100 Subject: [PATCH 133/332] Test github token before running release script (#17562) This stops people from getting half way through a step and it failing due to the github token having expired (this happens to me every damn time). --- changelog.d/17562.misc | 1 + scripts-dev/release.py | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 changelog.d/17562.misc diff --git a/changelog.d/17562.misc b/changelog.d/17562.misc new file mode 100644 index 0000000000..a267df8b83 --- /dev/null +++ b/changelog.d/17562.misc @@ -0,0 +1 @@ +Test github token before running release script steps. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 5e519bb758..1ace804682 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -324,6 +324,11 @@ def tag(gh_token: Optional[str]) -> None: def _tag(gh_token: Optional[str]) -> None: """Tags the release and generates a draft GitHub release""" + if gh_token: + # Test that the GH Token is valid before continuing. + gh = Github(gh_token) + gh.get_user() + # Make sure we're in a git repo. repo = get_repo_and_check_clean_checkout() @@ -418,6 +423,11 @@ def publish(gh_token: str) -> None: def _publish(gh_token: str) -> None: """Publish release on GitHub.""" + if gh_token: + # Test that the GH Token is valid before continuing. + gh = Github(gh_token) + gh.get_user() + # Make sure we're in a git repo. get_repo_and_check_clean_checkout() @@ -460,6 +470,11 @@ def upload(gh_token: Optional[str]) -> None: def _upload(gh_token: Optional[str]) -> None: """Upload release to pypi.""" + if gh_token: + # Test that the GH Token is valid before continuing. + gh = Github(gh_token) + gh.get_user() + current_version = get_package_version() tag_name = f"v{current_version}" @@ -555,6 +570,11 @@ def wait_for_actions(gh_token: Optional[str]) -> None: def _wait_for_actions(gh_token: Optional[str]) -> None: + if gh_token: + # Test that the GH Token is valid before continuing. + gh = Github(gh_token) + gh.get_user() + # Find out the version and tag name. current_version = get_package_version() tag_name = f"v{current_version}" @@ -711,6 +731,11 @@ Ask the designated people to do the blog and tweets.""" @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True) def full(gh_token: str) -> None: + if gh_token: + # Test that the GH Token is valid before continuing. + gh = Github(gh_token) + gh.get_user() + click.echo("1. If this is a security release, read the security wiki page.") click.echo("2. Check for any release blockers before proceeding.") click.echo(" https://github.com/element-hq/synapse/labels/X-Release-Blocker") From 993644ded0d9d80fc0ef87781b5b784ad8212903 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 19 Aug 2024 15:06:44 +0100 Subject: [PATCH 134/332] Fix zero length media handling (#17570) Results in: ``` AssertionError: null File "synapse/http/server.py", line 332, in _async_render_wrapper callback_return = await self._async_render(request) File "synapse/http/server.py", line 544, in _async_render callback_return = await raw_callback_return File "synapse/federation/transport/server/_base.py", line 369, in new_func response = await func( File "synapse/federation/transport/server/federation.py", line 826, in on_GET await self.media_repo.get_local_media( File "synapse/media/media_repository.py", line 473, in get_local_media await respond_with_multipart_responder( File "synapse/media/_base.py", line 353, in respond_with_multipart_responder assert content_length is not None ``` --- changelog.d/17570.bugfix | 1 + synapse/media/media_storage.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17570.bugfix diff --git a/changelog.d/17570.bugfix b/changelog.d/17570.bugfix new file mode 100644 index 0000000000..e2964168b1 --- /dev/null +++ b/changelog.d/17570.bugfix @@ -0,0 +1 @@ +Fix bug where we would respond with an error when a remote server asked for media that had a length of 0, using the new multipart federation media endpoint. diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index cf4208eb71..c25d1a9ba3 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -544,7 +544,7 @@ class MultipartFileConsumer: Calculate the content length of the multipart response in bytes. """ - if not self.length: + if self.length is None: return None # calculate length of json field and content-type, disposition headers json_field = json.dumps(self.json_field) From 261e7462814871b7a122fcd5518afad82530a44c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 19 Aug 2024 20:09:41 +0100 Subject: [PATCH 135/332] Sliding sync: Add classes for per-connection state (#17574) This is some prep work ahead of correctly tracking receipts, where we will also want to track the room status in terms of last receipt we had sent down. Essentially, we add two classes `PerConnectionState` and a mutable version, and then operate on those. --------- Co-authored-by: Eric Eastwood --- changelog.d/17574.misc | 1 + synapse/handlers/sliding_sync.py | 295 ++++++++++++++++++++----------- 2 files changed, 196 insertions(+), 100 deletions(-) create mode 100644 changelog.d/17574.misc diff --git a/changelog.d/17574.misc b/changelog.d/17574.misc new file mode 100644 index 0000000000..71020abec4 --- /dev/null +++ b/changelog.d/17574.misc @@ -0,0 +1 @@ +Refactor per-connection state in experimental sliding sync handler. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 99510254f3..c615cc7c32 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -19,6 +19,8 @@ # import enum import logging +import typing +from collections import ChainMap from enum import Enum from itertools import chain from typing import ( @@ -30,11 +32,13 @@ from typing import ( List, Literal, Mapping, + MutableMapping, Optional, Sequence, Set, Tuple, Union, + cast, ) import attr @@ -571,21 +575,21 @@ class SlidingSyncHandler: # See https://github.com/matrix-org/matrix-doc/issues/1144 raise NotImplementedError() - if from_token: - # Check that we recognize the connection position, if not tell the - # clients that they need to start again. - # - # If we don't do this and the client asks for the full range of - # rooms, we end up sending down all rooms and their state from - # scratch (which can be very slow). By expiring the connection we - # allow the client a chance to do an initial request with a smaller - # range of rooms to get them some results sooner but will end up - # taking the same amount of time (more with round-trips and - # re-processing) in the end to get everything again. - if not await self.connection_store.is_valid_token( - sync_config, from_token.connection_position - ): - raise SlidingSyncUnknownPosition() + # Get the per-connection state (if any). + # + # Raises an exception if there is a `connection_position` that we don't + # recognize. If we don't do this and the client asks for the full range + # of rooms, we end up sending down all rooms and their state from + # scratch (which can be very slow). By expiring the connection we allow + # the client a chance to do an initial request with a smaller range of + # rooms to get them some results sooner but will end up taking the same + # amount of time (more with round-trips and re-processing) in the end to + # get everything again. + previous_connection_state = ( + await self.connection_store.get_per_connection_state( + sync_config, from_token + ) + ) await self.connection_store.mark_token_seen( sync_config=sync_config, @@ -781,11 +785,7 @@ class SlidingSyncHandler: # we haven't sent the room down, or we have but there are missing # updates). for room_id in relevant_room_map: - status = await self.connection_store.have_sent_room( - sync_config, - from_token.connection_position, - room_id, - ) + status = previous_connection_state.rooms.have_sent_room(room_id) if ( # The room was never sent down before so the client needs to know # about it regardless of any updates. @@ -821,6 +821,7 @@ class SlidingSyncHandler: async def handle_room(room_id: str) -> None: room_sync_result = await self.get_room_sync_data( sync_config=sync_config, + per_connection_state=previous_connection_state, room_id=room_id, room_sync_config=relevant_rooms_to_send_map[room_id], room_membership_for_user_at_to_token=room_membership_for_user_map[ @@ -853,6 +854,8 @@ class SlidingSyncHandler: ) if has_lists or has_room_subscriptions: + new_connection_state = previous_connection_state.get_mutable() + # We now calculate if any rooms outside the range have had updates, # which we are not sending down. # @@ -882,11 +885,18 @@ class SlidingSyncHandler: ) unsent_room_ids = list(missing_event_map_by_room) - connection_position = await self.connection_store.record_rooms( + new_connection_state.rooms.record_unsent_rooms( + unsent_room_ids, from_token.stream_token + ) + + new_connection_state.rooms.record_sent_rooms( + relevant_rooms_to_send_map.keys() + ) + + connection_position = await self.connection_store.record_new_state( sync_config=sync_config, from_token=from_token, - sent_room_ids=relevant_rooms_to_send_map.keys(), - unsent_room_ids=unsent_room_ids, + per_connection_state=new_connection_state, ) elif from_token: connection_position = from_token.connection_position @@ -1939,6 +1949,7 @@ class SlidingSyncHandler: async def get_room_sync_data( self, sync_config: SlidingSyncConfig, + per_connection_state: "PerConnectionState", room_id: str, room_sync_config: RoomSyncConfig, room_membership_for_user_at_to_token: _RoomMembershipForUser, @@ -1986,11 +1997,7 @@ class SlidingSyncHandler: from_bound = None initial = True if from_token and not room_membership_for_user_at_to_token.newly_joined: - room_status = await self.connection_store.have_sent_room( - sync_config=sync_config, - connection_token=from_token.connection_position, - room_id=room_id, - ) + room_status = per_connection_state.rooms.have_sent_room(room_id) if room_status.status == HaveSentRoomFlag.LIVE: from_bound = from_token.stream_token.room_key initial = False @@ -3034,6 +3041,121 @@ HAVE_SENT_ROOM_NEVER = HaveSentRoom(HaveSentRoomFlag.NEVER, None) HAVE_SENT_ROOM_LIVE = HaveSentRoom(HaveSentRoomFlag.LIVE, None) +@attr.s(auto_attribs=True, slots=True, frozen=True) +class RoomStatusMap: + """For a given stream, e.g. events, records what we have or have not sent + down for that stream in a given room.""" + + # `room_id` -> `HaveSentRoom` + _statuses: Mapping[str, HaveSentRoom] = attr.Factory(dict) + + def have_sent_room(self, room_id: str) -> HaveSentRoom: + """Return whether we have previously sent the room down""" + return self._statuses.get(room_id, HAVE_SENT_ROOM_NEVER) + + def get_mutable(self) -> "MutableRoomStatusMap": + """Get a mutable copy of this state.""" + return MutableRoomStatusMap( + statuses=self._statuses, + ) + + def copy(self) -> "RoomStatusMap": + """Make a copy of the class. Useful for converting from a mutable to + immutable version.""" + + return RoomStatusMap(statuses=dict(self._statuses)) + + +class MutableRoomStatusMap(RoomStatusMap): + """A mutable version of `RoomStatusMap`""" + + # We use a ChainMap here so that we can easily track what has been updated + # and what hasn't. Note that when we persist the per connection state this + # will get flattened to a normal dict (via calling `.copy()`) + _statuses: typing.ChainMap[str, HaveSentRoom] + + def __init__( + self, + statuses: Mapping[str, HaveSentRoom], + ) -> None: + # ChainMap requires a mutable mapping, but we're not actually going to + # mutate it. + statuses = cast(MutableMapping, statuses) + + super().__init__( + statuses=ChainMap({}, statuses), + ) + + def get_updates(self) -> Mapping[str, HaveSentRoom]: + """Return only the changes that were made""" + return self._statuses.maps[0] + + def record_sent_rooms(self, room_ids: StrCollection) -> None: + """Record that we have sent these rooms in the response""" + for room_id in room_ids: + current_status = self._statuses.get(room_id, HAVE_SENT_ROOM_NEVER) + if current_status.status == HaveSentRoomFlag.LIVE: + continue + + self._statuses[room_id] = HAVE_SENT_ROOM_LIVE + + def record_unsent_rooms( + self, room_ids: StrCollection, from_token: StreamToken + ) -> None: + """Record that we have not sent these rooms in the response, but there + have been updates. + """ + # Whether we add/update the entries for unsent rooms depends on the + # existing entry: + # - LIVE: We have previously sent down everything up to + # `last_room_token, so we update the entry to be `PREVIOUSLY` with + # `last_room_token`. + # - PREVIOUSLY: We have previously sent down everything up to *a* + # given token, so we don't need to update the entry. + # - NEVER: We have never previously sent down the room, and we haven't + # sent anything down this time either so we leave it as NEVER. + + for room_id in room_ids: + current_status = self._statuses.get(room_id, HAVE_SENT_ROOM_NEVER) + if current_status.status != HaveSentRoomFlag.LIVE: + continue + + self._statuses[room_id] = HaveSentRoom.previously(from_token.room_key) + + +@attr.s(auto_attribs=True) +class PerConnectionState: + """The per-connection state. A snapshot of what we've sent down the connection before. + + Currently, we track whether we've sent down various aspects of a given room before. + + We use the `rooms` field to store the position in the events stream for each room that we've previously sent to the client before. On the next request that includes the room, we can then send only what's changed since that recorded position. + + Same goes for the `receipts` field so we only need to send the new receipts since the last time you made a sync request. + + Attributes: + rooms: The status of each room for the events stream. + """ + + rooms: RoomStatusMap = attr.Factory(RoomStatusMap) + + def get_mutable(self) -> "MutablePerConnectionState": + """Get a mutable copy of this state.""" + return MutablePerConnectionState( + rooms=self.rooms.get_mutable(), + ) + + +@attr.s(auto_attribs=True) +class MutablePerConnectionState(PerConnectionState): + """A mutable version of `PerConnectionState`""" + + rooms: MutableRoomStatusMap + + def has_updates(self) -> bool: + return bool(self.rooms.get_updates()) + + @attr.s(auto_attribs=True) class SlidingSyncConnectionStore: """In-memory store of per-connection state, including what rooms we have @@ -3063,9 +3185,9 @@ class SlidingSyncConnectionStore: to mapping of room ID to `HaveSentRoom`. """ - # `(user_id, conn_id)` -> `token` -> `room_id` -> `HaveSentRoom` - _connections: Dict[Tuple[str, str], Dict[int, Dict[str, HaveSentRoom]]] = ( - attr.Factory(dict) + # `(user_id, conn_id)` -> `connection_position` -> `PerConnectionState` + _connections: Dict[Tuple[str, str], Dict[int, PerConnectionState]] = attr.Factory( + dict ) async def is_valid_token( @@ -3078,48 +3200,52 @@ class SlidingSyncConnectionStore: conn_key = self._get_connection_key(sync_config) return connection_token in self._connections.get(conn_key, {}) - async def have_sent_room( - self, sync_config: SlidingSyncConfig, connection_token: int, room_id: str - ) -> HaveSentRoom: - """For the given user_id/conn_id/token, return whether we have - previously sent the room down - """ - - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.setdefault(conn_key, {}) - room_status = sync_statuses.get(connection_token, {}).get( - room_id, HAVE_SENT_ROOM_NEVER - ) - - return room_status - - @trace - async def record_rooms( + async def get_per_connection_state( self, sync_config: SlidingSyncConfig, from_token: Optional[SlidingSyncStreamToken], - *, - sent_room_ids: StrCollection, - unsent_room_ids: StrCollection, - ) -> int: - """Record which rooms we have/haven't sent down in a new response + ) -> PerConnectionState: + """Fetch the per-connection state for the token. - Attributes: - sync_config - from_token: The since token from the request, if any - sent_room_ids: The set of room IDs that we have sent down as - part of this request (only needs to be ones we didn't - previously sent down). - unsent_room_ids: The set of room IDs that have had updates - since the `from_token`, but which were not included in - this request + Raises: + SlidingSyncUnknownPosition if the connection_token is unknown + """ + if from_token is None: + return PerConnectionState() + + connection_position = from_token.connection_position + if connection_position == 0: + # Initial sync (request without a `from_token`) starts at `0` so + # there is no existing per-connection state + return PerConnectionState() + + conn_key = self._get_connection_key(sync_config) + sync_statuses = self._connections.get(conn_key, {}) + connection_state = sync_statuses.get(connection_position) + + if connection_state is None: + raise SlidingSyncUnknownPosition() + + return connection_state + + @trace + async def record_new_state( + self, + sync_config: SlidingSyncConfig, + from_token: Optional[SlidingSyncStreamToken], + per_connection_state: MutablePerConnectionState, + ) -> int: + """Record updated per-connection state, returning the connection + position associated with the new state. + + If there are no changes to the state this may return the same token as + the existing per-connection state. """ prev_connection_token = 0 if from_token is not None: prev_connection_token = from_token.connection_position - # If there are no changes then this is a noop. - if not sent_room_ids and not unsent_room_ids: + if not per_connection_state.has_updates(): return prev_connection_token conn_key = self._get_connection_key(sync_config) @@ -3130,42 +3256,11 @@ class SlidingSyncConnectionStore: new_store_token = prev_connection_token + 1 sync_statuses.pop(new_store_token, None) - # Copy over and update the room mappings. - new_room_statuses = dict(sync_statuses.get(prev_connection_token, {})) - - # Whether we have updated the `new_room_statuses`, if we don't by the - # end we can treat this as a noop. - have_updated = False - for room_id in sent_room_ids: - new_room_statuses[room_id] = HAVE_SENT_ROOM_LIVE - have_updated = True - - # Whether we add/update the entries for unsent rooms depends on the - # existing entry: - # - LIVE: We have previously sent down everything up to - # `last_room_token, so we update the entry to be `PREVIOUSLY` with - # `last_room_token`. - # - PREVIOUSLY: We have previously sent down everything up to *a* - # given token, so we don't need to update the entry. - # - NEVER: We have never previously sent down the room, and we haven't - # sent anything down this time either so we leave it as NEVER. - - # Work out the new state for unsent rooms that were `LIVE`. - if from_token: - new_unsent_state = HaveSentRoom.previously(from_token.stream_token.room_key) - else: - new_unsent_state = HAVE_SENT_ROOM_NEVER - - for room_id in unsent_room_ids: - prev_state = new_room_statuses.get(room_id) - if prev_state is not None and prev_state.status == HaveSentRoomFlag.LIVE: - new_room_statuses[room_id] = new_unsent_state - have_updated = True - - if not have_updated: - return prev_connection_token - - sync_statuses[new_store_token] = new_room_statuses + # We copy the `MutablePerConnectionState` so that the inner `ChainMap`s + # don't grow forever. + sync_statuses[new_store_token] = PerConnectionState( + rooms=per_connection_state.rooms.copy(), + ) return new_store_token From 8b8d74d12f29a9b58c367715d7b8234e39a5eb1a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 19 Aug 2024 21:16:07 +0100 Subject: [PATCH 136/332] Sliding sync: Correctly track which read receipts we have or have not sent down. (#17575) Add connection tracking to the receipts extension. Based on #17574 --------- Co-authored-by: Eric Eastwood --- changelog.d/17575.misc | 1 + synapse/handlers/sliding_sync.py | 283 +++++++++++++----- synapse/storage/databases/main/receipts.py | 42 +++ .../sliding_sync/test_extension_receipts.py | 105 +++++++ .../client/sliding_sync/test_extensions.py | 13 +- tests/rest/client/utils.py | 14 +- 6 files changed, 378 insertions(+), 80 deletions(-) create mode 100644 changelog.d/17575.misc diff --git a/changelog.d/17575.misc b/changelog.d/17575.misc new file mode 100644 index 0000000000..1b4a53ee17 --- /dev/null +++ b/changelog.d/17575.misc @@ -0,0 +1 @@ +Correctly track read receipts that should be sent down in experimental sliding sync. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index c615cc7c32..64b5acbe98 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -29,6 +29,7 @@ from typing import ( Callable, Dict, Final, + Generic, List, Literal, Mapping, @@ -37,6 +38,7 @@ from typing import ( Sequence, Set, Tuple, + TypeVar, Union, cast, ) @@ -55,6 +57,7 @@ from synapse.api.constants import ( from synapse.api.errors import SlidingSyncUnknownPosition from synapse.events import EventBase, StrippedStateEvent from synapse.events.utils import parse_stripped_state_event, strip_event +from synapse.handlers.receipts import ReceiptEventSource from synapse.handlers.relations import BundledAggregations from synapse.logging.opentracing import ( SynapseTags, @@ -821,7 +824,7 @@ class SlidingSyncHandler: async def handle_room(room_id: str) -> None: room_sync_result = await self.get_room_sync_data( sync_config=sync_config, - per_connection_state=previous_connection_state, + previous_connection_state=previous_connection_state, room_id=room_id, room_sync_config=relevant_rooms_to_send_map[room_id], room_membership_for_user_at_to_token=room_membership_for_user_map[ @@ -839,9 +842,13 @@ class SlidingSyncHandler: with start_active_span("sliding_sync.generate_room_entries"): await concurrently_execute(handle_room, relevant_rooms_to_send_map, 10) + new_connection_state = previous_connection_state.get_mutable() + extensions = await self.get_extensions_response( sync_config=sync_config, actual_lists=lists, + previous_connection_state=previous_connection_state, + new_connection_state=new_connection_state, # We're purposely using `relevant_room_map` instead of # `relevant_rooms_to_send_map` here. This needs to be all room_ids we could # send regardless of whether they have an event update or not. The @@ -854,8 +861,6 @@ class SlidingSyncHandler: ) if has_lists or has_room_subscriptions: - new_connection_state = previous_connection_state.get_mutable() - # We now calculate if any rooms outside the range have had updates, # which we are not sending down. # @@ -886,7 +891,7 @@ class SlidingSyncHandler: unsent_room_ids = list(missing_event_map_by_room) new_connection_state.rooms.record_unsent_rooms( - unsent_room_ids, from_token.stream_token + unsent_room_ids, from_token.stream_token.room_key ) new_connection_state.rooms.record_sent_rooms( @@ -896,7 +901,7 @@ class SlidingSyncHandler: connection_position = await self.connection_store.record_new_state( sync_config=sync_config, from_token=from_token, - per_connection_state=new_connection_state, + new_connection_state=new_connection_state, ) elif from_token: connection_position = from_token.connection_position @@ -1949,7 +1954,7 @@ class SlidingSyncHandler: async def get_room_sync_data( self, sync_config: SlidingSyncConfig, - per_connection_state: "PerConnectionState", + previous_connection_state: "PerConnectionState", room_id: str, room_sync_config: RoomSyncConfig, room_membership_for_user_at_to_token: _RoomMembershipForUser, @@ -1997,7 +2002,7 @@ class SlidingSyncHandler: from_bound = None initial = True if from_token and not room_membership_for_user_at_to_token.newly_joined: - room_status = per_connection_state.rooms.have_sent_room(room_id) + room_status = previous_connection_state.rooms.have_sent_room(room_id) if room_status.status == HaveSentRoomFlag.LIVE: from_bound = from_token.stream_token.room_key initial = False @@ -2476,6 +2481,8 @@ class SlidingSyncHandler: async def get_extensions_response( self, sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + new_connection_state: "MutablePerConnectionState", actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], actual_room_ids: Set[str], actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], @@ -2486,6 +2493,9 @@ class SlidingSyncHandler: Args: sync_config: Sync configuration + new_connection_state: Snapshot of the current per-connection state + new_per_connection_state: A mutable copy of the per-connection + state, used to record updates to the state during this request. actual_lists: Sliding window API. A map of list key to list results in the Sliding Sync response. actual_room_ids: The actual room IDs in the the Sliding Sync response. @@ -2530,6 +2540,8 @@ class SlidingSyncHandler: if sync_config.extensions.receipts is not None: receipts_response = await self.get_receipts_extension_response( sync_config=sync_config, + previous_connection_state=previous_connection_state, + new_connection_state=new_connection_state, actual_lists=actual_lists, actual_room_ids=actual_room_ids, actual_room_response_map=actual_room_response_map, @@ -2849,6 +2861,8 @@ class SlidingSyncHandler: async def get_receipts_extension_response( self, sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + new_connection_state: "MutablePerConnectionState", actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], actual_room_ids: Set[str], actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], @@ -2860,6 +2874,9 @@ class SlidingSyncHandler: Args: sync_config: Sync configuration + previous_connection_state: The current per-connection state + new_connection_state: A mutable copy of the per-connection + state, used to record updates to the state. actual_lists: Sliding window API. A map of list key to list results in the Sliding Sync response. actual_room_ids: The actual room IDs in the the Sliding Sync response. @@ -2882,50 +2899,145 @@ class SlidingSyncHandler: room_id_to_receipt_map: Dict[str, JsonMapping] = {} if len(relevant_room_ids) > 0: - # TODO: Take connection tracking into account so that when a room comes back - # into range we can send the receipts that were missed. - receipt_source = self.event_sources.sources.receipt - receipts, _ = await receipt_source.get_new_events( - user=sync_config.user, - from_key=( - from_token.stream_token.receipt_key - if from_token - else MultiWriterStreamToken(stream=0) - ), - to_key=to_token.receipt_key, - # This is a dummy value and isn't used in the function - limit=0, - room_ids=relevant_room_ids, - is_guest=False, + # We need to handle the different cases depending on if we have sent + # down receipts previously or not, so we split the relevant rooms + # up into different collections based on status. + live_rooms = set() + previously_rooms: Dict[str, MultiWriterStreamToken] = {} + initial_rooms = set() + + for room_id in relevant_room_ids: + if not from_token: + initial_rooms.add(room_id) + continue + + # If we're sending down the room from scratch again for some reason, we + # should always resend the receipts as well (regardless of if + # we've sent them down before). This is to mimic the behaviour + # of what happens on initial sync, where you get a chunk of + # timeline with all of the corresponding receipts for the events in the timeline. + room_result = actual_room_response_map.get(room_id) + if room_result is not None and room_result.initial: + initial_rooms.add(room_id) + continue + + room_status = previous_connection_state.receipts.have_sent_room(room_id) + if room_status.status == HaveSentRoomFlag.LIVE: + live_rooms.add(room_id) + elif room_status.status == HaveSentRoomFlag.PREVIOUSLY: + assert room_status.last_token is not None + previously_rooms[room_id] = room_status.last_token + elif room_status.status == HaveSentRoomFlag.NEVER: + initial_rooms.add(room_id) + else: + assert_never(room_status.status) + + # The set of receipts that we fetched. Private receipts need to be + # filtered out before returning. + fetched_receipts = [] + + # For live rooms we just fetch all receipts in those rooms since the + # `since` token. + if live_rooms: + assert from_token is not None + receipts = await self.store.get_linearized_receipts_for_rooms( + room_ids=live_rooms, + from_key=from_token.stream_token.receipt_key, + to_key=to_token.receipt_key, + ) + fetched_receipts.extend(receipts) + + # For rooms we've previously sent down, but aren't up to date, we + # need to use the from token from the room status. + if previously_rooms: + for room_id, receipt_token in previously_rooms.items(): + # TODO: Limit the number of receipts we're about to send down + # for the room, if its too many we should TODO + previously_receipts = ( + await self.store.get_linearized_receipts_for_room( + room_id=room_id, + from_key=receipt_token, + to_key=to_token.receipt_key, + ) + ) + fetched_receipts.extend(previously_receipts) + + # For rooms we haven't previously sent down, we could send all receipts + # from that room but we only want to include receipts for events + # in the timeline to avoid bloating and blowing up the sync response + # as the number of users in the room increases. (this behavior is part of the spec) + for room_id in initial_rooms: + room_result = actual_room_response_map.get(room_id) + if room_result is None: + continue + + relevant_event_ids = [ + event.event_id for event in room_result.timeline_events + ] + + # TODO: In the future, it would be good to fetch less receipts + # out of the database in the first place but we would need to + # add a new `event_id` index to `receipts_linearized`. + initial_receipts = await self.store.get_linearized_receipts_for_room( + room_id=room_id, + to_key=to_token.receipt_key, + ) + + for receipt in initial_receipts: + content = { + event_id: content_value + for event_id, content_value in receipt["content"].items() + if event_id in relevant_event_ids + } + if content: + fetched_receipts.append( + { + "type": receipt["type"], + "room_id": receipt["room_id"], + "content": content, + } + ) + + fetched_receipts = ReceiptEventSource.filter_out_private_receipts( + fetched_receipts, sync_config.user.to_string() ) - for receipt in receipts: + for receipt in fetched_receipts: # These fields should exist for every receipt room_id = receipt["room_id"] type = receipt["type"] content = receipt["content"] - # For `inital: True` rooms, we only want to include receipts for events - # in the timeline. - room_result = actual_room_response_map.get(room_id) - if room_result is not None: - if room_result.initial: - # TODO: In the future, it would be good to fetch less receipts - # out of the database in the first place but we would need to - # add a new `event_id` index to `receipts_linearized`. - relevant_event_ids = [ - event.event_id for event in room_result.timeline_events - ] - - assert isinstance(content, dict) - content = { - event_id: content_value - for event_id, content_value in content.items() - if event_id in relevant_event_ids - } - room_id_to_receipt_map[room_id] = {"type": type, "content": content} + # Now we update the per-connection state to track which receipts we have + # and haven't sent down. + new_connection_state.receipts.record_sent_rooms(relevant_room_ids) + + if from_token: + # Now find the set of rooms that may have receipts that we're not sending + # down. We only need to check rooms that we have previously returned + # receipts for (in `previous_connection_state`) because we only care about + # updating `LIVE` rooms to `PREVIOUSLY`. The `PREVIOUSLY` rooms will just + # stay pointing at their previous position so we don't need to waste time + # checking those and since we default to `NEVER`, rooms that were `NEVER` + # sent before don't need to be recorded as we'll handle them correctly when + # they come into range for the first time. + rooms_no_receipts = [ + room_id + for room_id, room_status in previous_connection_state.receipts._statuses.items() + if room_status.status == HaveSentRoomFlag.LIVE + and room_id not in relevant_room_ids + ] + changed_rooms = await self.store.get_rooms_with_receipts_between( + rooms_no_receipts, + from_key=from_token.stream_token.receipt_key, + to_key=to_token.receipt_key, + ) + new_connection_state.receipts.record_unsent_rooms( + changed_rooms, from_token.stream_token.receipt_key + ) + return SlidingSyncResult.Extensions.ReceiptsExtension( room_id_to_receipt_map=room_id_to_receipt_map, ) @@ -3016,9 +3128,15 @@ class HaveSentRoomFlag(Enum): LIVE = 3 +T = TypeVar("T") + + @attr.s(auto_attribs=True, slots=True, frozen=True) -class HaveSentRoom: - """Whether we have sent the room down a sliding sync connection. +class HaveSentRoom(Generic[T]): + """Whether we have sent the room data down a sliding sync connection. + + We are generic over the type of token used, e.g. `RoomStreamToken` or + `MultiWriterStreamToken`. Attributes: status: Flag of if we have or haven't sent down the room @@ -3029,54 +3147,58 @@ class HaveSentRoom: """ status: HaveSentRoomFlag - last_token: Optional[RoomStreamToken] + last_token: Optional[T] @staticmethod - def previously(last_token: RoomStreamToken) -> "HaveSentRoom": + def live() -> "HaveSentRoom[T]": + return HaveSentRoom(HaveSentRoomFlag.LIVE, None) + + @staticmethod + def previously(last_token: T) -> "HaveSentRoom[T]": """Constructor for `PREVIOUSLY` flag.""" return HaveSentRoom(HaveSentRoomFlag.PREVIOUSLY, last_token) - -HAVE_SENT_ROOM_NEVER = HaveSentRoom(HaveSentRoomFlag.NEVER, None) -HAVE_SENT_ROOM_LIVE = HaveSentRoom(HaveSentRoomFlag.LIVE, None) + @staticmethod + def never() -> "HaveSentRoom[T]": + return HaveSentRoom(HaveSentRoomFlag.NEVER, None) @attr.s(auto_attribs=True, slots=True, frozen=True) -class RoomStatusMap: +class RoomStatusMap(Generic[T]): """For a given stream, e.g. events, records what we have or have not sent down for that stream in a given room.""" # `room_id` -> `HaveSentRoom` - _statuses: Mapping[str, HaveSentRoom] = attr.Factory(dict) + _statuses: Mapping[str, HaveSentRoom[T]] = attr.Factory(dict) - def have_sent_room(self, room_id: str) -> HaveSentRoom: + def have_sent_room(self, room_id: str) -> HaveSentRoom[T]: """Return whether we have previously sent the room down""" - return self._statuses.get(room_id, HAVE_SENT_ROOM_NEVER) + return self._statuses.get(room_id, HaveSentRoom.never()) - def get_mutable(self) -> "MutableRoomStatusMap": + def get_mutable(self) -> "MutableRoomStatusMap[T]": """Get a mutable copy of this state.""" return MutableRoomStatusMap( statuses=self._statuses, ) - def copy(self) -> "RoomStatusMap": + def copy(self) -> "RoomStatusMap[T]": """Make a copy of the class. Useful for converting from a mutable to immutable version.""" return RoomStatusMap(statuses=dict(self._statuses)) -class MutableRoomStatusMap(RoomStatusMap): +class MutableRoomStatusMap(RoomStatusMap[T]): """A mutable version of `RoomStatusMap`""" # We use a ChainMap here so that we can easily track what has been updated # and what hasn't. Note that when we persist the per connection state this # will get flattened to a normal dict (via calling `.copy()`) - _statuses: typing.ChainMap[str, HaveSentRoom] + _statuses: typing.ChainMap[str, HaveSentRoom[T]] def __init__( self, - statuses: Mapping[str, HaveSentRoom], + statuses: Mapping[str, HaveSentRoom[T]], ) -> None: # ChainMap requires a mutable mapping, but we're not actually going to # mutate it. @@ -3086,22 +3208,20 @@ class MutableRoomStatusMap(RoomStatusMap): statuses=ChainMap({}, statuses), ) - def get_updates(self) -> Mapping[str, HaveSentRoom]: + def get_updates(self) -> Mapping[str, HaveSentRoom[T]]: """Return only the changes that were made""" return self._statuses.maps[0] def record_sent_rooms(self, room_ids: StrCollection) -> None: """Record that we have sent these rooms in the response""" for room_id in room_ids: - current_status = self._statuses.get(room_id, HAVE_SENT_ROOM_NEVER) + current_status = self._statuses.get(room_id, HaveSentRoom.never()) if current_status.status == HaveSentRoomFlag.LIVE: continue - self._statuses[room_id] = HAVE_SENT_ROOM_LIVE + self._statuses[room_id] = HaveSentRoom.live() - def record_unsent_rooms( - self, room_ids: StrCollection, from_token: StreamToken - ) -> None: + def record_unsent_rooms(self, room_ids: StrCollection, from_token: T) -> None: """Record that we have not sent these rooms in the response, but there have been updates. """ @@ -3116,33 +3236,42 @@ class MutableRoomStatusMap(RoomStatusMap): # sent anything down this time either so we leave it as NEVER. for room_id in room_ids: - current_status = self._statuses.get(room_id, HAVE_SENT_ROOM_NEVER) + current_status = self._statuses.get(room_id, HaveSentRoom.never()) if current_status.status != HaveSentRoomFlag.LIVE: continue - self._statuses[room_id] = HaveSentRoom.previously(from_token.room_key) + self._statuses[room_id] = HaveSentRoom.previously(from_token) @attr.s(auto_attribs=True) class PerConnectionState: - """The per-connection state. A snapshot of what we've sent down the connection before. + """The per-connection state. A snapshot of what we've sent down the + connection before. - Currently, we track whether we've sent down various aspects of a given room before. + Currently, we track whether we've sent down various aspects of a given room + before. - We use the `rooms` field to store the position in the events stream for each room that we've previously sent to the client before. On the next request that includes the room, we can then send only what's changed since that recorded position. + We use the `rooms` field to store the position in the events stream for each + room that we've previously sent to the client before. On the next request + that includes the room, we can then send only what's changed since that + recorded position. - Same goes for the `receipts` field so we only need to send the new receipts since the last time you made a sync request. + Same goes for the `receipts` field so we only need to send the new receipts + since the last time you made a sync request. Attributes: rooms: The status of each room for the events stream. + receipts: The status of each room for the receipts stream. """ - rooms: RoomStatusMap = attr.Factory(RoomStatusMap) + rooms: RoomStatusMap[RoomStreamToken] = attr.Factory(RoomStatusMap) + receipts: RoomStatusMap[MultiWriterStreamToken] = attr.Factory(RoomStatusMap) def get_mutable(self) -> "MutablePerConnectionState": """Get a mutable copy of this state.""" return MutablePerConnectionState( rooms=self.rooms.get_mutable(), + receipts=self.receipts.get_mutable(), ) @@ -3150,10 +3279,11 @@ class PerConnectionState: class MutablePerConnectionState(PerConnectionState): """A mutable version of `PerConnectionState`""" - rooms: MutableRoomStatusMap + rooms: MutableRoomStatusMap[RoomStreamToken] + receipts: MutableRoomStatusMap[MultiWriterStreamToken] def has_updates(self) -> bool: - return bool(self.rooms.get_updates()) + return bool(self.rooms.get_updates()) or bool(self.receipts.get_updates()) @attr.s(auto_attribs=True) @@ -3233,7 +3363,7 @@ class SlidingSyncConnectionStore: self, sync_config: SlidingSyncConfig, from_token: Optional[SlidingSyncStreamToken], - per_connection_state: MutablePerConnectionState, + new_connection_state: MutablePerConnectionState, ) -> int: """Record updated per-connection state, returning the connection position associated with the new state. @@ -3245,7 +3375,7 @@ class SlidingSyncConnectionStore: if from_token is not None: prev_connection_token = from_token.connection_position - if not per_connection_state.has_updates(): + if not new_connection_state.has_updates(): return prev_connection_token conn_key = self._get_connection_key(sync_config) @@ -3259,7 +3389,8 @@ class SlidingSyncConnectionStore: # We copy the `MutablePerConnectionState` so that the inner `ChainMap`s # don't grow forever. sync_statuses[new_store_token] = PerConnectionState( - rooms=per_connection_state.rooms.copy(), + rooms=new_connection_state.rooms.copy(), + receipts=new_connection_state.receipts.copy(), ) return new_store_token diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 3bde0ae0d4..e266cc2a20 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -51,10 +51,12 @@ from synapse.types import ( JsonMapping, MultiWriterStreamToken, PersistedPosition, + StrCollection, ) from synapse.util import json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.util.iterutils import batch_iter if TYPE_CHECKING: from synapse.server import HomeServer @@ -550,6 +552,46 @@ class ReceiptsWorkerStore(SQLBaseStore): return results + async def get_rooms_with_receipts_between( + self, + room_ids: StrCollection, + from_key: MultiWriterStreamToken, + to_key: MultiWriterStreamToken, + ) -> StrCollection: + """Given a set of room_ids, find out which ones (may) have receipts + between the two tokens (> `from_token` and <= `to_token`).""" + + room_ids = self._receipts_stream_cache.get_entities_changed( + room_ids, from_key.stream + ) + if not room_ids: + return [] + + def f(txn: LoggingTransaction, room_ids: StrCollection) -> StrCollection: + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", room_ids + ) + + sql = f""" + SELECT DISTINCT room_id FROM receipts_linearized + WHERE {clause} AND ? < stream_id AND stream_id <= ? + """ + args.append(from_key.stream) + args.append(to_key.get_max_stream_pos()) + + txn.execute(sql, args) + + return [room_id for room_id, in txn] + + results: List[str] = [] + for batch in batch_iter(room_ids, 1000): + batch_result = await self.db_pool.runInteraction( + "get_rooms_with_receipts_between", f, batch + ) + results.extend(batch_result) + + return results + async def get_users_sent_receipts_between( self, last_id: int, current_id: int ) -> List[str]: diff --git a/tests/rest/client/sliding_sync/test_extension_receipts.py b/tests/rest/client/sliding_sync/test_extension_receipts.py index 65fbac260e..39c51b367c 100644 --- a/tests/rest/client/sliding_sync/test_extension_receipts.py +++ b/tests/rest/client/sliding_sync/test_extension_receipts.py @@ -677,3 +677,108 @@ class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): set(), exact=True, ) + + def test_receipts_incremental_sync_out_of_range(self) -> None: + """Tests that we don't return read receipts for rooms that fall out of + range, but then do send all read receipts once they're back in range. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + + # Send a message and read receipt into room2 + event_response = self.helper.send(room_id2, body="new event", tok=user2_tok) + room2_event_id = event_response["event_id"] + + self.helper.send_read_receipt(room_id2, room2_event_id, tok=user1_tok) + + # Now send a message into room1 so that it is at the top of the list + self.helper.send(room_id1, body="new event", tok=user2_tok) + + # Make a SS request for only the top room. + sync_body = { + "lists": { + "main": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 5, + } + }, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # The receipt is in room2, but only room1 is returned, so we don't + # expect to get the receipt. + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) + + # Move room2 into range. + self.helper.send(room_id2, body="new event", tok=user2_tok) + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We expect to see the read receipt of room2, as that has the most + # recent update. + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id2}, + exact=True, + ) + receipt = response_body["extensions"]["receipts"]["rooms"][room_id2] + self.assertIncludes( + receipt["content"][room2_event_id][ReceiptTypes.READ].keys(), + {user1_id}, + exact=True, + ) + + # Send a message into room1 to bump it to the top, but also send a + # receipt in room2 + self.helper.send(room_id1, body="new event", tok=user2_tok) + self.helper.send_read_receipt(room_id2, room2_event_id, tok=user2_tok) + + # We don't expect to see the new read receipt. + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) + + # But if we send a new message into room2, we expect to get the missing receipts + self.helper.send(room_id2, body="new event", tok=user2_tok) + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id2}, + exact=True, + ) + + # We should only see the new receipt + receipt = response_body["extensions"]["receipts"]["rooms"][room_id2] + self.assertIncludes( + receipt["content"][room2_event_id][ReceiptTypes.READ].keys(), + {user2_id}, + exact=True, + ) diff --git a/tests/rest/client/sliding_sync/test_extensions.py b/tests/rest/client/sliding_sync/test_extensions.py index 68f6661334..ae823d5415 100644 --- a/tests/rest/client/sliding_sync/test_extensions.py +++ b/tests/rest/client/sliding_sync/test_extensions.py @@ -120,19 +120,26 @@ class SlidingSyncExtensionsTestCase(SlidingSyncBase): "foo-list": { "ranges": [[0, 1]], "required_state": [], - "timeline_limit": 0, + # We set this to `1` because we're testing `receipts` which + # interact with the `timeline`. With receipts, when a room + # hasn't been sent down the connection before or it appears + # as `initial: true`, we only include receipts for events in + # the timeline to avoid bloating and blowing up the sync + # response as the number of users in the room increases. + # (this behavior is part of the spec) + "timeline_limit": 1, }, # We expect this list range to include room5, room4, room3 "bar-list": { "ranges": [[0, 2]], "required_state": [], - "timeline_limit": 0, + "timeline_limit": 1, }, }, "room_subscriptions": { room_id1: { "required_state": [], - "timeline_limit": 0, + "timeline_limit": 1, } }, } diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index e43140720d..9614cdd66a 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -45,7 +45,7 @@ from typing_extensions import Literal from twisted.test.proto_helpers import MemoryReactorClock from twisted.web.server import Site -from synapse.api.constants import Membership +from synapse.api.constants import Membership, ReceiptTypes from synapse.api.errors import Codes from synapse.server import HomeServer from synapse.types import JsonDict @@ -944,3 +944,15 @@ class RestHelper: assert len(p.links) == 1, "not exactly one link in confirmation page" oauth_uri = p.links[0] return oauth_uri + + def send_read_receipt(self, room_id: str, event_id: str, *, tok: str) -> None: + """Send a read receipt into the room at the given event""" + channel = make_request( + self.reactor, + self.site, + method="POST", + path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_id}", + content={}, + access_token=tok, + ) + assert channel.code == HTTPStatus.OK, channel.text_body From 950ba844f7f0655bfea800d744aada690b9384ae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2024 10:13:26 +0100 Subject: [PATCH 137/332] Sliding Sync: Batch up fetching receipts (#17589) This is to make initial sliding sync a bit faster --- changelog.d/17589.misc | 1 + synapse/handlers/sliding_sync.py | 30 ++++++++++++++++-------------- 2 files changed, 17 insertions(+), 14 deletions(-) create mode 100644 changelog.d/17589.misc diff --git a/changelog.d/17589.misc b/changelog.d/17589.misc new file mode 100644 index 0000000000..1b4a53ee17 --- /dev/null +++ b/changelog.d/17589.misc @@ -0,0 +1 @@ +Correctly track read receipts that should be sent down in experimental sliding sync. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 64b5acbe98..c6834a1036 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -2858,6 +2858,7 @@ class SlidingSyncHandler: account_data_by_room_map=account_data_by_room_map, ) + @trace async def get_receipts_extension_response( self, sync_config: SlidingSyncConfig, @@ -2966,24 +2967,25 @@ class SlidingSyncHandler: # from that room but we only want to include receipts for events # in the timeline to avoid bloating and blowing up the sync response # as the number of users in the room increases. (this behavior is part of the spec) - for room_id in initial_rooms: - room_result = actual_room_response_map.get(room_id) - if room_result is None: - continue - - relevant_event_ids = [ - event.event_id for event in room_result.timeline_events - ] - - # TODO: In the future, it would be good to fetch less receipts - # out of the database in the first place but we would need to - # add a new `event_id` index to `receipts_linearized`. - initial_receipts = await self.store.get_linearized_receipts_for_room( - room_id=room_id, + initial_rooms = { + room_id + for room_id in initial_rooms + if room_id in actual_room_response_map + } + if initial_rooms: + initial_receipts = await self.store.get_linearized_receipts_for_rooms( + room_ids=initial_rooms, to_key=to_token.receipt_key, ) for receipt in initial_receipts: + relevant_event_ids = { + event.event_id + for event in actual_room_response_map[ + receipt["room_id"] + ].timeline_events + } + content = { event_id: content_value for event_id, content_value in receipt["content"].items() From 6eb98a4f1cbc1707ebcb1b8c39ac464d31d47609 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2024 10:31:25 +0100 Subject: [PATCH 138/332] Sliding Sync: Handle timeline limit changes (take 2) (#17579) This supersedes #17503, given the per-connection state is being heavily rewritten it felt easier to recreate the PR on top of that work. This correctly handles the case of timeline limits going up and down. This does not handle changes in `required_state`, but that can be done as a separate PR. Based on #17575. --------- Co-authored-by: Eric Eastwood --- changelog.d/17579.misc | 1 + synapse/handlers/sliding_sync.py | 152 ++++++++++++++++-- synapse/rest/client/sync.py | 5 + synapse/types/handlers/__init__.py | 4 + .../sliding_sync/test_rooms_timeline.py | 136 ++++++++++++++++ 5 files changed, 285 insertions(+), 13 deletions(-) create mode 100644 changelog.d/17579.misc diff --git a/changelog.d/17579.misc b/changelog.d/17579.misc new file mode 100644 index 0000000000..5eb3d5c7b4 --- /dev/null +++ b/changelog.d/17579.misc @@ -0,0 +1 @@ +Handle changes in `timeline_limit` in experimental sliding sync. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index c6834a1036..c7c81b1554 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -787,7 +787,20 @@ class SlidingSyncHandler: # subscription and have updates we need to send (i.e. either because # we haven't sent the room down, or we have but there are missing # updates). - for room_id in relevant_room_map: + for room_id, room_config in relevant_room_map.items(): + prev_room_sync_config = previous_connection_state.room_configs.get( + room_id + ) + if prev_room_sync_config is not None: + # Always include rooms whose timeline limit has increased. + # (see the "XXX: Odd behavior" described below) + if ( + prev_room_sync_config.timeline_limit + < room_config.timeline_limit + ): + rooms_should_send.add(room_id) + continue + status = previous_connection_state.rooms.have_sent_room(room_id) if ( # The room was never sent down before so the client needs to know @@ -819,12 +832,15 @@ class SlidingSyncHandler: if room_id in rooms_should_send } + new_connection_state = previous_connection_state.get_mutable() + @trace @tag_args async def handle_room(room_id: str) -> None: room_sync_result = await self.get_room_sync_data( sync_config=sync_config, previous_connection_state=previous_connection_state, + new_connection_state=new_connection_state, room_id=room_id, room_sync_config=relevant_rooms_to_send_map[room_id], room_membership_for_user_at_to_token=room_membership_for_user_map[ @@ -842,8 +858,6 @@ class SlidingSyncHandler: with start_active_span("sliding_sync.generate_room_entries"): await concurrently_execute(handle_room, relevant_rooms_to_send_map, 10) - new_connection_state = previous_connection_state.get_mutable() - extensions = await self.get_extensions_response( sync_config=sync_config, actual_lists=lists, @@ -1955,6 +1969,7 @@ class SlidingSyncHandler: self, sync_config: SlidingSyncConfig, previous_connection_state: "PerConnectionState", + new_connection_state: "MutablePerConnectionState", room_id: str, room_sync_config: RoomSyncConfig, room_membership_for_user_at_to_token: _RoomMembershipForUser, @@ -1998,9 +2013,27 @@ class SlidingSyncHandler: # - For an incremental sync where we haven't sent it down this # connection before # - # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917 + # Relevant spec issue: + # https://github.com/matrix-org/matrix-spec/issues/1917 + # + # XXX: Odd behavior - We also check if the `timeline_limit` has increased, if so + # we ignore the from bound for the timeline to send down a larger chunk of + # history and set `unstable_expanded_timeline` to true. This is only being added + # to match the behavior of the Sliding Sync proxy as we expect the ElementX + # client to feel a certain way and be able to trickle in a full page of timeline + # messages to fill up the screen. This is a bit different to the behavior of the + # Sliding Sync proxy (which sets initial=true, but then doesn't send down the + # full state again), but existing apps, e.g. ElementX, just need `limited` set. + # We don't explicitly set `limited` but this will be the case for any room that + # has more history than we're trying to pull out. Using + # `unstable_expanded_timeline` allows us to avoid contaminating what `initial` + # or `limited` mean for clients that interpret them correctly. In future this + # behavior is almost certainly going to change. + # + # TODO: Also handle changes to `required_state` from_bound = None initial = True + ignore_timeline_bound = False if from_token and not room_membership_for_user_at_to_token.newly_joined: room_status = previous_connection_state.rooms.have_sent_room(room_id) if room_status.status == HaveSentRoomFlag.LIVE: @@ -2018,7 +2051,26 @@ class SlidingSyncHandler: log_kv({"sliding_sync.room_status": room_status}) - log_kv({"sliding_sync.from_bound": from_bound, "sliding_sync.initial": initial}) + prev_room_sync_config = previous_connection_state.room_configs.get(room_id) + if prev_room_sync_config is not None: + # Check if the timeline limit has increased, if so ignore the + # timeline bound and record the change (see "XXX: Odd behavior" + # above). + if ( + prev_room_sync_config.timeline_limit + < room_sync_config.timeline_limit + ): + ignore_timeline_bound = True + + # TODO: Check for changes in `required_state`` + + log_kv( + { + "sliding_sync.from_bound": from_bound, + "sliding_sync.initial": initial, + "sliding_sync.ignore_timeline_bound": ignore_timeline_bound, + } + ) # Assemble the list of timeline events # @@ -2055,6 +2107,10 @@ class SlidingSyncHandler: room_membership_for_user_at_to_token.event_pos.to_room_stream_token() ) + timeline_from_bound = from_bound + if ignore_timeline_bound: + timeline_from_bound = None + # For initial `/sync` (and other historical scenarios mentioned above), we # want to view a historical section of the timeline; to fetch events by # `topological_ordering` (best representation of the room DAG as others were @@ -2080,7 +2136,7 @@ class SlidingSyncHandler: pagination_method: PaginateFunction = ( # Use `topographical_ordering` for historical events paginate_room_events_by_topological_ordering - if from_bound is None + if timeline_from_bound is None # Use `stream_ordering` for updates else paginate_room_events_by_stream_ordering ) @@ -2090,7 +2146,7 @@ class SlidingSyncHandler: # (from newer to older events) starting at to_bound. # This ensures we fill the `limit` with the newest events first, from_key=to_bound, - to_key=from_bound, + to_key=timeline_from_bound, direction=Direction.BACKWARDS, # We add one so we can determine if there are enough events to saturate # the limit or not (see `limited`) @@ -2448,6 +2504,55 @@ class SlidingSyncHandler: if new_bump_event_pos.stream > 0: bump_stamp = new_bump_event_pos.stream + unstable_expanded_timeline = False + prev_room_sync_config = previous_connection_state.room_configs.get(room_id) + # Record the `room_sync_config` if we're `ignore_timeline_bound` (which means + # that the `timeline_limit` has increased) + if ignore_timeline_bound: + # FIXME: We signal the fact that we're sending down more events to + # the client by setting `unstable_expanded_timeline` to true (see + # "XXX: Odd behavior" above). + unstable_expanded_timeline = True + + new_connection_state.room_configs[room_id] = RoomSyncConfig( + timeline_limit=room_sync_config.timeline_limit, + required_state_map=room_sync_config.required_state_map, + ) + elif prev_room_sync_config is not None: + # If the result is `limited` then we need to record that the + # `timeline_limit` has been reduced, as when/if the client later requests + # more timeline then we have more data to send. + # + # Otherwise (when not `limited`) we don't need to record that the + # `timeline_limit` has been reduced, as the *effective* `timeline_limit` + # (i.e. the amount of timeline we have previously sent to the client) is at + # least the previous `timeline_limit`. + # + # This is to handle the case where the `timeline_limit` e.g. goes from 10 to + # 5 to 10 again (without any timeline gaps), where there's no point sending + # down the initial historical chunk events when the `timeline_limit` is + # increased as the client already has the 10 previous events. However, if + # client has a gap in the timeline (i.e. `limited` is True), then we *do* + # need to record the reduced timeline. + # + # TODO: Handle timeline gaps (`get_timeline_gaps()`) - This is separate from + # the gaps we might see on the client because a response was `limited` we're + # talking about above. + if ( + limited + and prev_room_sync_config.timeline_limit + > room_sync_config.timeline_limit + ): + new_connection_state.room_configs[room_id] = RoomSyncConfig( + timeline_limit=room_sync_config.timeline_limit, + required_state_map=room_sync_config.required_state_map, + ) + + # TODO: Record changes in required_state. + + else: + new_connection_state.room_configs[room_id] = room_sync_config + set_tag(SynapseTags.RESULT_PREFIX + "initial", initial) return SlidingSyncResult.RoomResult( @@ -2462,6 +2567,7 @@ class SlidingSyncHandler: stripped_state=stripped_state, prev_batch=prev_batch_token, limited=limited, + unstable_expanded_timeline=unstable_expanded_timeline, num_live=num_live, bump_stamp=bump_stamp, joined_count=room_membership_summary.get( @@ -3264,16 +3370,30 @@ class PerConnectionState: Attributes: rooms: The status of each room for the events stream. receipts: The status of each room for the receipts stream. + room_configs: Map from room_id to the `RoomSyncConfig` of all + rooms that we have previously sent down. """ rooms: RoomStatusMap[RoomStreamToken] = attr.Factory(RoomStatusMap) receipts: RoomStatusMap[MultiWriterStreamToken] = attr.Factory(RoomStatusMap) + room_configs: Mapping[str, RoomSyncConfig] = attr.Factory(dict) + def get_mutable(self) -> "MutablePerConnectionState": """Get a mutable copy of this state.""" + room_configs = cast(MutableMapping[str, RoomSyncConfig], self.room_configs) + return MutablePerConnectionState( rooms=self.rooms.get_mutable(), receipts=self.receipts.get_mutable(), + room_configs=ChainMap({}, room_configs), + ) + + def copy(self) -> "PerConnectionState": + return PerConnectionState( + rooms=self.rooms.copy(), + receipts=self.receipts.copy(), + room_configs=dict(self.room_configs), ) @@ -3284,8 +3404,18 @@ class MutablePerConnectionState(PerConnectionState): rooms: MutableRoomStatusMap[RoomStreamToken] receipts: MutableRoomStatusMap[MultiWriterStreamToken] + room_configs: typing.ChainMap[str, RoomSyncConfig] + def has_updates(self) -> bool: - return bool(self.rooms.get_updates()) or bool(self.receipts.get_updates()) + return ( + bool(self.rooms.get_updates()) + or bool(self.receipts.get_updates()) + or bool(self.get_room_config_updates()) + ) + + def get_room_config_updates(self) -> Mapping[str, RoomSyncConfig]: + """Get updates to the room sync config""" + return self.room_configs.maps[0] @attr.s(auto_attribs=True) @@ -3369,7 +3499,6 @@ class SlidingSyncConnectionStore: ) -> int: """Record updated per-connection state, returning the connection position associated with the new state. - If there are no changes to the state this may return the same token as the existing per-connection state. """ @@ -3390,10 +3519,7 @@ class SlidingSyncConnectionStore: # We copy the `MutablePerConnectionState` so that the inner `ChainMap`s # don't grow forever. - sync_statuses[new_store_token] = PerConnectionState( - rooms=new_connection_state.rooms.copy(), - receipts=new_connection_state.receipts.copy(), - ) + sync_statuses[new_store_token] = new_connection_state.copy() return new_store_token diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 8c5db2a513..21b90b0674 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -1044,6 +1044,11 @@ class SlidingSyncRestServlet(RestServlet): if room_result.initial: serialized_rooms[room_id]["initial"] = room_result.initial + if room_result.unstable_expanded_timeline: + serialized_rooms[room_id][ + "unstable_expanded_timeline" + ] = room_result.unstable_expanded_timeline + # This will be omitted for invite/knock rooms with `stripped_state` if ( room_result.required_state is not None diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 363f060bef..580342d98a 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -171,6 +171,9 @@ class SlidingSyncResult: their local state. When there is an update, servers MUST omit this flag entirely and NOT send "initial":false as this is wasteful on bandwidth. The absence of this flag means 'false'. + unstable_expanded_timeline: Flag which is set if we're returning more historic + events due to the timeline limit having increased. See "XXX: Odd behavior" + comment ing `synapse.handlers.sliding_sync`. required_state: The current state of the room timeline: Latest events in the room. The last event is the most recent. bundled_aggregations: A mapping of event ID to the bundled aggregations for @@ -219,6 +222,7 @@ class SlidingSyncResult: heroes: Optional[List[StrippedHero]] is_dm: bool initial: bool + unstable_expanded_timeline: bool # Should be empty for invite/knock rooms with `stripped_state` required_state: List[EventBase] # Should be empty for invite/knock rooms with `stripped_state` diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py index 2e9586ca73..eeac0d6aa9 100644 --- a/tests/rest/client/sliding_sync/test_rooms_timeline.py +++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py @@ -17,6 +17,7 @@ from typing import List, Optional from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin +from synapse.api.constants import EventTypes from synapse.rest.client import login, room, sync from synapse.server import HomeServer from synapse.types import StreamToken, StrSequence @@ -573,3 +574,138 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): # Nothing to see for this banned user in the room in the token range self.assertIsNone(response_body["rooms"].get(room_id1)) + + def test_increasing_timeline_range_sends_more_messages(self) -> None: + """ + Test that increasing the timeline limit via room subscriptions sends the + room down with more messages in a limited sync. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [[EventTypes.Create, ""]], + "timeline_limit": 1, + } + } + } + + message_events = [] + for _ in range(10): + resp = self.helper.send(room_id1, "msg", tok=user1_tok) + message_events.append(resp["event_id"]) + + # Make the first Sliding Sync request + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + room_response = response_body["rooms"][room_id1] + + self.assertEqual(room_response["initial"], True) + self.assertNotIn("unstable_expanded_timeline", room_response) + self.assertEqual(room_response["limited"], True) + + # We only expect the last message at first + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[event["event_id"] for event in room_response["timeline"]], + expected_event_ids=message_events[-1:], + message=str(room_response["timeline"]), + ) + + # We also expect to get the create event state. + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + self._assertRequiredStateIncludes( + room_response["required_state"], + {state_map[(EventTypes.Create, "")]}, + exact=True, + ) + + # Now do another request with a room subscription with an increased timeline limit + sync_body["room_subscriptions"] = { + room_id1: { + "required_state": [], + "timeline_limit": 10, + } + } + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + room_response = response_body["rooms"][room_id1] + + self.assertNotIn("initial", room_response) + self.assertEqual(room_response["unstable_expanded_timeline"], True) + self.assertEqual(room_response["limited"], True) + + # Now we expect all the messages + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[event["event_id"] for event in room_response["timeline"]], + expected_event_ids=message_events, + message=str(room_response["timeline"]), + ) + + # We don't expect to get the room create down, as nothing has changed. + self.assertNotIn("required_state", room_response) + + # Decreasing the timeline limit shouldn't resend any events + sync_body["room_subscriptions"] = { + room_id1: { + "required_state": [], + "timeline_limit": 5, + } + } + + event_response = self.helper.send(room_id1, "msg", tok=user1_tok) + latest_event_id = event_response["event_id"] + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + room_response = response_body["rooms"][room_id1] + + self.assertNotIn("initial", room_response) + self.assertNotIn("unstable_expanded_timeline", room_response) + self.assertEqual(room_response["limited"], False) + + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[event["event_id"] for event in room_response["timeline"]], + expected_event_ids=[latest_event_id], + message=str(room_response["timeline"]), + ) + + # Increasing the limit to what it was before also should not resend any + # events + sync_body["room_subscriptions"] = { + room_id1: { + "required_state": [], + "timeline_limit": 10, + } + } + + event_response = self.helper.send(room_id1, "msg", tok=user1_tok) + latest_event_id = event_response["event_id"] + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + room_response = response_body["rooms"][room_id1] + + self.assertNotIn("initial", room_response) + self.assertNotIn("unstable_expanded_timeline", room_response) + self.assertEqual(room_response["limited"], False) + + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[event["event_id"] for event in room_response["timeline"]], + expected_event_ids=[latest_event_id], + message=str(room_response["timeline"]), + ) From 10428046e4afd10bb53dc86528bda550527e8572 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2024 12:36:49 +0100 Subject: [PATCH 139/332] Add metrics for sliding sync processing time (#17593) This should let us see how quickly we actually process things in practice. --- changelog.d/17593.misc | 1 + synapse/handlers/sliding_sync.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 changelog.d/17593.misc diff --git a/changelog.d/17593.misc b/changelog.d/17593.misc new file mode 100644 index 0000000000..60afc284be --- /dev/null +++ b/changelog.d/17593.misc @@ -0,0 +1 @@ +Add histogram metrics for sliding sync processing time. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index c7c81b1554..14d0ecbe16 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -45,6 +45,7 @@ from typing import ( import attr from immutabledict import immutabledict +from prometheus_client import Histogram from typing_extensions import assert_never from synapse.api.constants import ( @@ -104,6 +105,13 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +sync_processing_time = Histogram( + "synapse_sliding_sync_processing_time", + "Time taken to generate a sliding sync response, ignoring wait times.", + ["initial"], +) + + class Sentinel(enum.Enum): # defining a sentinel in this way allows mypy to correctly handle the # type of a dictionary lookup and subsequent type narrowing. @@ -571,6 +579,8 @@ class SlidingSyncHandler: from_token: The point in the stream to sync from. Token of the end of the previous batch. May be `None` if this is the initial sync request. """ + start_time_s = self.clock.time() + user_id = sync_config.user.to_string() app_service = self.store.get_app_service_by_user_id(user_id) if app_service: @@ -934,6 +944,11 @@ class SlidingSyncHandler: set_tag(SynapseTags.RESULT_PREFIX + "result", bool(sliding_sync_result)) set_tag(SynapseTags.FUNC_ARG_PREFIX + "sync_config.user", user_id) + end_time_s = self.clock.time() + sync_processing_time.labels(from_token is not None).observe( + end_time_s - start_time_s + ) + return sliding_sync_result @trace From f1e8d2d15abcac780ab97c245daecfd6c5c6aa3f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2024 12:57:34 +0100 Subject: [PATCH 140/332] Sliding Sync: Speed up getting receipts for initial rooms (#17592) Let's only pull out the events we care about. Note that the index isn't necessary here, as postgres is happy to scan the set of rooms for the events. --- changelog.d/17592.misc | 1 + synapse/handlers/sliding_sync.py | 37 ++------ synapse/storage/databases/main/receipts.py | 84 +++++++++++++++++++ .../delta/86/02_receipts_event_id_index.sql | 15 ++++ 4 files changed, 108 insertions(+), 29 deletions(-) create mode 100644 changelog.d/17592.misc create mode 100644 synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql diff --git a/changelog.d/17592.misc b/changelog.d/17592.misc new file mode 100644 index 0000000000..1b4a53ee17 --- /dev/null +++ b/changelog.d/17592.misc @@ -0,0 +1 @@ +Correctly track read receipts that should be sent down in experimental sliding sync. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 14d0ecbe16..af8d7ab96c 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -3088,38 +3088,17 @@ class SlidingSyncHandler: # from that room but we only want to include receipts for events # in the timeline to avoid bloating and blowing up the sync response # as the number of users in the room increases. (this behavior is part of the spec) - initial_rooms = { - room_id + initial_rooms_and_event_ids = [ + (room_id, event.event_id) for room_id in initial_rooms if room_id in actual_room_response_map - } - if initial_rooms: - initial_receipts = await self.store.get_linearized_receipts_for_rooms( - room_ids=initial_rooms, - to_key=to_token.receipt_key, + for event in actual_room_response_map[room_id].timeline_events + ] + if initial_rooms_and_event_ids: + initial_receipts = await self.store.get_linearized_receipts_for_events( + room_and_event_ids=initial_rooms_and_event_ids, ) - - for receipt in initial_receipts: - relevant_event_ids = { - event.event_id - for event in actual_room_response_map[ - receipt["room_id"] - ].timeline_events - } - - content = { - event_id: content_value - for event_id, content_value in receipt["content"].items() - if event_id in relevant_event_ids - } - if content: - fetched_receipts.append( - { - "type": receipt["type"], - "room_id": receipt["room_id"], - "content": content, - } - ) + fetched_receipts.extend(initial_receipts) fetched_receipts = ReceiptEventSource.filter_out_private_receipts( fetched_receipts, sync_config.user.to_string() diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index e266cc2a20..0a20f5db4c 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -43,6 +43,7 @@ from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, LoggingTransaction, + make_tuple_in_list_sql_clause, ) from synapse.storage.engines._base import IsolationLevel from synapse.storage.util.id_generators import MultiWriterIdGenerator @@ -481,6 +482,83 @@ class ReceiptsWorkerStore(SQLBaseStore): } return results + async def get_linearized_receipts_for_events( + self, + room_and_event_ids: Collection[Tuple[str, str]], + ) -> Sequence[JsonMapping]: + """Get all receipts for the given set of events. + + Arguments: + room_and_event_ids: A collection of 2-tuples of room ID and + event IDs to fetch receipts for + + Returns: + A list of receipts, one per room. + """ + + def get_linearized_receipts_for_events_txn( + txn: LoggingTransaction, + room_id_event_id_tuples: Collection[Tuple[str, str]], + ) -> List[Tuple[str, str, str, str, Optional[str], str]]: + clause, args = make_tuple_in_list_sql_clause( + self.database_engine, ("room_id", "event_id"), room_id_event_id_tuples + ) + + sql = f""" + SELECT room_id, receipt_type, user_id, event_id, thread_id, data + FROM receipts_linearized + WHERE {clause} + """ + + txn.execute(sql, args) + + return txn.fetchall() + + # room_id -> event_id -> receipt_type -> user_id -> receipt data + room_to_content: Dict[str, Dict[str, Dict[str, Dict[str, JsonMapping]]]] = {} + for batch in batch_iter(room_and_event_ids, 1000): + batch_results = await self.db_pool.runInteraction( + "get_linearized_receipts_for_events", + get_linearized_receipts_for_events_txn, + batch, + ) + + for ( + room_id, + receipt_type, + user_id, + event_id, + thread_id, + data, + ) in batch_results: + content = room_to_content.setdefault(room_id, {}) + user_receipts = content.setdefault(event_id, {}).setdefault( + receipt_type, {} + ) + + receipt_data = db_to_json(data) + if thread_id is not None: + receipt_data["thread_id"] = thread_id + + # MSC4102: always replace threaded receipts with unthreaded ones + # if there is a clash. Specifically: + # - if there is no existing receipt, great, set the data. + # - if there is an existing receipt, is it threaded (thread_id + # present)? YES: replace if this receipt has no thread id. + # NO: do not replace. This means we will drop some receipts, but + # MSC4102 is designed to drop semantically meaningless receipts, + # so this is okay. Previously, we would drop meaningful data! + if user_id in user_receipts: + if "thread_id" in user_receipts[user_id] and not thread_id: + user_receipts[user_id] = receipt_data + else: + user_receipts[user_id] = receipt_data + + return [ + {"type": EduTypes.RECEIPT, "room_id": room_id, "content": content} + for room_id, content in room_to_content.items() + ] + @cached( num_args=2, ) @@ -996,6 +1074,12 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore): self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME, self._background_receipts_graph_unique_index, ) + self.db_pool.updates.register_background_index_update( + update_name="receipts_room_id_event_id_index", + index_name="receipts_linearized_event_id", + table="receipts_linearized", + columns=("room_id", "event_id"), + ) async def _populate_receipt_event_stream_ordering( self, progress: JsonDict, batch_size: int diff --git a/synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql b/synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql new file mode 100644 index 0000000000..e6db91e5b5 --- /dev/null +++ b/synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql @@ -0,0 +1,15 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8602, 'receipts_room_id_event_id_index', '{}'); From 7c9684b5dc5238c8ddaa604b9d306ff92c172161 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 20 Aug 2024 14:57:22 +0200 Subject: [PATCH 141/332] 1.114.0rc1 --- CHANGES.md | 51 ++++++++++++++++++++++++++++++++++++++++ changelog.d/17483.bugfix | 1 - changelog.d/17510.bugfix | 1 - changelog.d/17514.misc | 1 - changelog.d/17515.doc | 3 --- changelog.d/17531.misc | 1 - changelog.d/17535.bugfix | 1 - changelog.d/17536.misc | 1 - changelog.d/17537.misc | 1 - changelog.d/17538.bugfix | 1 - changelog.d/17542.misc | 1 - changelog.d/17545.bugfix | 1 - changelog.d/17548.misc | 1 - changelog.d/17557.misc | 1 - changelog.d/17558.misc | 1 - changelog.d/17559.doc | 1 - changelog.d/17561.misc | 1 - changelog.d/17562.misc | 1 - changelog.d/17563.misc | 1 - changelog.d/17564.misc | 1 - changelog.d/17566.misc | 1 - changelog.d/17567.misc | 1 - changelog.d/17568.bugfix | 1 - changelog.d/17569.misc | 1 - changelog.d/17570.bugfix | 1 - changelog.d/17571.misc | 1 - changelog.d/17574.misc | 1 - changelog.d/17575.misc | 1 - changelog.d/17579.misc | 1 - changelog.d/17589.misc | 1 - changelog.d/17592.misc | 1 - changelog.d/17593.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 34 files changed, 58 insertions(+), 34 deletions(-) delete mode 100644 changelog.d/17483.bugfix delete mode 100644 changelog.d/17510.bugfix delete mode 100644 changelog.d/17514.misc delete mode 100644 changelog.d/17515.doc delete mode 100644 changelog.d/17531.misc delete mode 100644 changelog.d/17535.bugfix delete mode 100644 changelog.d/17536.misc delete mode 100644 changelog.d/17537.misc delete mode 100644 changelog.d/17538.bugfix delete mode 100644 changelog.d/17542.misc delete mode 100644 changelog.d/17545.bugfix delete mode 100644 changelog.d/17548.misc delete mode 100644 changelog.d/17557.misc delete mode 100644 changelog.d/17558.misc delete mode 100644 changelog.d/17559.doc delete mode 100644 changelog.d/17561.misc delete mode 100644 changelog.d/17562.misc delete mode 100644 changelog.d/17563.misc delete mode 100644 changelog.d/17564.misc delete mode 100644 changelog.d/17566.misc delete mode 100644 changelog.d/17567.misc delete mode 100644 changelog.d/17568.bugfix delete mode 100644 changelog.d/17569.misc delete mode 100644 changelog.d/17570.bugfix delete mode 100644 changelog.d/17571.misc delete mode 100644 changelog.d/17574.misc delete mode 100644 changelog.d/17575.misc delete mode 100644 changelog.d/17579.misc delete mode 100644 changelog.d/17589.misc delete mode 100644 changelog.d/17592.misc delete mode 100644 changelog.d/17593.misc diff --git a/CHANGES.md b/CHANGES.md index 7b71e32e23..8295f0d805 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,54 @@ +# Synapse 1.114.0rc1 (2024-08-20) + +### Bugfixes + +- Start handlers for new media endpoints when media resource configured. ([\#17483](https://github.com/element-hq/synapse/issues/17483)) +- Fix timeline ordering (using `stream_ordering` instead of topological ordering) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17510](https://github.com/element-hq/synapse/issues/17510)) +- Fix experimental sliding sync implementation to remember any updates in rooms that were not sent down immediately. ([\#17535](https://github.com/element-hq/synapse/issues/17535)) +- Better exclude partially stated rooms if we must await full state in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17538](https://github.com/element-hq/synapse/issues/17538)) +- Handle lower-case http headers in `_Mulitpart_Parser_Protocol`. ([\#17545](https://github.com/element-hq/synapse/issues/17545)) +- Fix fetching federation signing keys from servers that omit `old_verify_keys`. Contributed by @tulir @ Beeper. ([\#17568](https://github.com/element-hq/synapse/issues/17568)) +- Fix bug where we would respond with an error when a remote server asked for media that had a length of 0, using the new multipart federation media endpoint. ([\#17570](https://github.com/element-hq/synapse/issues/17570)) + +### Improved Documentation + +- Clarify default behaviour of the + [`auto_accept_invites.worker_to_run_on`](https://element-hq.github.io/synapse/develop/usage/configuration/config_documentation.html#auto-accept-invites) + option. ([\#17515](https://github.com/element-hq/synapse/issues/17515)) +- Improve docstrings for profile methods. ([\#17559](https://github.com/element-hq/synapse/issues/17559)) + +### Internal Changes + +- Add more tracing to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17514](https://github.com/element-hq/synapse/issues/17514)) +- Fixup comment in sliding sync implementation. ([\#17531](https://github.com/element-hq/synapse/issues/17531)) +- Replace override of deprecated method `HTTPAdapter.get_connection` with `get_connection_with_tls_context`. ([\#17536](https://github.com/element-hq/synapse/issues/17536)) +- Fix performance of device lists in `/key/changes` and sliding sync. ([\#17537](https://github.com/element-hq/synapse/issues/17537), [\#17548](https://github.com/element-hq/synapse/issues/17548)) +- Bump setuptools from 67.6.0 to 72.1.0. ([\#17542](https://github.com/element-hq/synapse/issues/17542)) +- Add a utility function for generating random event IDs. ([\#17557](https://github.com/element-hq/synapse/issues/17557)) +- Speed up responding to media requests. ([\#17558](https://github.com/element-hq/synapse/issues/17558), [\#17561](https://github.com/element-hq/synapse/issues/17561), [\#17564](https://github.com/element-hq/synapse/issues/17564), [\#17566](https://github.com/element-hq/synapse/issues/17566), [\#17567](https://github.com/element-hq/synapse/issues/17567), [\#17569](https://github.com/element-hq/synapse/issues/17569)) +- Test github token before running release script steps. ([\#17562](https://github.com/element-hq/synapse/issues/17562)) +- Reduce log spam of multipart files. ([\#17563](https://github.com/element-hq/synapse/issues/17563)) +- Add a flag to `/versions`, `org.matrix.simplified_msc3575`, to indicate whether experimental sliding sync support has been enabled. ([\#17571](https://github.com/element-hq/synapse/issues/17571)) +- Refactor per-connection state in experimental sliding sync handler. ([\#17574](https://github.com/element-hq/synapse/issues/17574)) +- Correctly track read receipts that should be sent down in experimental sliding sync. ([\#17575](https://github.com/element-hq/synapse/issues/17575), [\#17589](https://github.com/element-hq/synapse/issues/17589), [\#17592](https://github.com/element-hq/synapse/issues/17592)) +- Handle changes in `timeline_limit` in experimental sliding sync. ([\#17579](https://github.com/element-hq/synapse/issues/17579)) +- Add histogram metrics for sliding sync processing time. ([\#17593](https://github.com/element-hq/synapse/issues/17593)) + + + +### Updates to locked dependencies + +* Bump bytes from 1.6.1 to 1.7.1. ([\#17526](https://github.com/element-hq/synapse/issues/17526)) +* Bump lxml from 5.2.2 to 5.3.0. ([\#17550](https://github.com/element-hq/synapse/issues/17550)) +* Bump phonenumbers from 8.13.42 to 8.13.43. ([\#17551](https://github.com/element-hq/synapse/issues/17551)) +* Bump regex from 1.10.5 to 1.10.6. ([\#17527](https://github.com/element-hq/synapse/issues/17527)) +* Bump sentry-sdk from 2.10.0 to 2.12.0. ([\#17553](https://github.com/element-hq/synapse/issues/17553)) +* Bump serde from 1.0.204 to 1.0.206. ([\#17556](https://github.com/element-hq/synapse/issues/17556)) +* Bump serde_json from 1.0.122 to 1.0.124. ([\#17555](https://github.com/element-hq/synapse/issues/17555)) +* Bump sigstore/cosign-installer from 3.5.0 to 3.6.0. ([\#17549](https://github.com/element-hq/synapse/issues/17549)) +* Bump types-pyyaml from 6.0.12.20240311 to 6.0.12.20240808. ([\#17552](https://github.com/element-hq/synapse/issues/17552)) +* Bump types-requests from 2.31.0.20240406 to 2.32.0.20240712. ([\#17524](https://github.com/element-hq/synapse/issues/17524)) + # Synapse 1.113.0 (2024-08-13) No significant changes since 1.113.0rc1. diff --git a/changelog.d/17483.bugfix b/changelog.d/17483.bugfix deleted file mode 100644 index c97a802dbf..0000000000 --- a/changelog.d/17483.bugfix +++ /dev/null @@ -1 +0,0 @@ -Start handlers for new media endpoints when media resource configured. diff --git a/changelog.d/17510.bugfix b/changelog.d/17510.bugfix deleted file mode 100644 index 3170c284bd..0000000000 --- a/changelog.d/17510.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix timeline ordering (using `stream_ordering` instead of topological ordering) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17514.misc b/changelog.d/17514.misc deleted file mode 100644 index fc3cc37915..0000000000 --- a/changelog.d/17514.misc +++ /dev/null @@ -1 +0,0 @@ -Add more tracing to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17515.doc b/changelog.d/17515.doc deleted file mode 100644 index c2dbe24e9d..0000000000 --- a/changelog.d/17515.doc +++ /dev/null @@ -1,3 +0,0 @@ -Clarify default behaviour of the -[`auto_accept_invites.worker_to_run_on`](https://element-hq.github.io/synapse/develop/usage/configuration/config_documentation.html#auto-accept-invites) -option. \ No newline at end of file diff --git a/changelog.d/17531.misc b/changelog.d/17531.misc deleted file mode 100644 index 25b7b36a72..0000000000 --- a/changelog.d/17531.misc +++ /dev/null @@ -1 +0,0 @@ -Fixup comment in sliding sync implementation. diff --git a/changelog.d/17535.bugfix b/changelog.d/17535.bugfix deleted file mode 100644 index c5b5da0485..0000000000 --- a/changelog.d/17535.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix experimental sliding sync implementation to remember any updates in rooms that were not sent down immediately. diff --git a/changelog.d/17536.misc b/changelog.d/17536.misc deleted file mode 100644 index 116ef0c36d..0000000000 --- a/changelog.d/17536.misc +++ /dev/null @@ -1 +0,0 @@ -Replace override of deprecated method `HTTPAdapter.get_connection` with `get_connection_with_tls_context`. \ No newline at end of file diff --git a/changelog.d/17537.misc b/changelog.d/17537.misc deleted file mode 100644 index 861b241dcd..0000000000 --- a/changelog.d/17537.misc +++ /dev/null @@ -1 +0,0 @@ -Fix performance of device lists in `/key/changes` and sliding sync. diff --git a/changelog.d/17538.bugfix b/changelog.d/17538.bugfix deleted file mode 100644 index 9e4e31dbdb..0000000000 --- a/changelog.d/17538.bugfix +++ /dev/null @@ -1 +0,0 @@ -Better exclude partially stated rooms if we must await full state in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17542.misc b/changelog.d/17542.misc deleted file mode 100644 index b5773115ca..0000000000 --- a/changelog.d/17542.misc +++ /dev/null @@ -1 +0,0 @@ -Bump setuptools from 67.6.0 to 72.1.0. \ No newline at end of file diff --git a/changelog.d/17545.bugfix b/changelog.d/17545.bugfix deleted file mode 100644 index 31e22d873e..0000000000 --- a/changelog.d/17545.bugfix +++ /dev/null @@ -1 +0,0 @@ -Handle lower-case http headers in `_Mulitpart_Parser_Protocol`. \ No newline at end of file diff --git a/changelog.d/17548.misc b/changelog.d/17548.misc deleted file mode 100644 index 861b241dcd..0000000000 --- a/changelog.d/17548.misc +++ /dev/null @@ -1 +0,0 @@ -Fix performance of device lists in `/key/changes` and sliding sync. diff --git a/changelog.d/17557.misc b/changelog.d/17557.misc deleted file mode 100644 index 535f4b6e5f..0000000000 --- a/changelog.d/17557.misc +++ /dev/null @@ -1 +0,0 @@ -Add a utility function for generating random event IDs. \ No newline at end of file diff --git a/changelog.d/17558.misc b/changelog.d/17558.misc deleted file mode 100644 index cfa8089a81..0000000000 --- a/changelog.d/17558.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up responding to media requests. diff --git a/changelog.d/17559.doc b/changelog.d/17559.doc deleted file mode 100644 index e54a122b74..0000000000 --- a/changelog.d/17559.doc +++ /dev/null @@ -1 +0,0 @@ -Improve docstrings for profile methods. diff --git a/changelog.d/17561.misc b/changelog.d/17561.misc deleted file mode 100644 index cfa8089a81..0000000000 --- a/changelog.d/17561.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up responding to media requests. diff --git a/changelog.d/17562.misc b/changelog.d/17562.misc deleted file mode 100644 index a267df8b83..0000000000 --- a/changelog.d/17562.misc +++ /dev/null @@ -1 +0,0 @@ -Test github token before running release script steps. diff --git a/changelog.d/17563.misc b/changelog.d/17563.misc deleted file mode 100644 index 672764ab82..0000000000 --- a/changelog.d/17563.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce log spam of multipart files. diff --git a/changelog.d/17564.misc b/changelog.d/17564.misc deleted file mode 100644 index cfa8089a81..0000000000 --- a/changelog.d/17564.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up responding to media requests. diff --git a/changelog.d/17566.misc b/changelog.d/17566.misc deleted file mode 100644 index 7210753fa3..0000000000 --- a/changelog.d/17566.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up responding to media requests. \ No newline at end of file diff --git a/changelog.d/17567.misc b/changelog.d/17567.misc deleted file mode 100644 index cfa8089a81..0000000000 --- a/changelog.d/17567.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up responding to media requests. diff --git a/changelog.d/17568.bugfix b/changelog.d/17568.bugfix deleted file mode 100644 index 71a1f12915..0000000000 --- a/changelog.d/17568.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix fetching federation signing keys from servers that omit `old_verify_keys`. Contributed by @tulir @ Beeper. diff --git a/changelog.d/17569.misc b/changelog.d/17569.misc deleted file mode 100644 index cfa8089a81..0000000000 --- a/changelog.d/17569.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up responding to media requests. diff --git a/changelog.d/17570.bugfix b/changelog.d/17570.bugfix deleted file mode 100644 index e2964168b1..0000000000 --- a/changelog.d/17570.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we would respond with an error when a remote server asked for media that had a length of 0, using the new multipart federation media endpoint. diff --git a/changelog.d/17571.misc b/changelog.d/17571.misc deleted file mode 100644 index 67182a4fcd..0000000000 --- a/changelog.d/17571.misc +++ /dev/null @@ -1 +0,0 @@ -Add a flag to `/versions`, `org.matrix.simplified_msc3575`, to indicate whether experimental sliding sync support has been enabled. diff --git a/changelog.d/17574.misc b/changelog.d/17574.misc deleted file mode 100644 index 71020abec4..0000000000 --- a/changelog.d/17574.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor per-connection state in experimental sliding sync handler. diff --git a/changelog.d/17575.misc b/changelog.d/17575.misc deleted file mode 100644 index 1b4a53ee17..0000000000 --- a/changelog.d/17575.misc +++ /dev/null @@ -1 +0,0 @@ -Correctly track read receipts that should be sent down in experimental sliding sync. diff --git a/changelog.d/17579.misc b/changelog.d/17579.misc deleted file mode 100644 index 5eb3d5c7b4..0000000000 --- a/changelog.d/17579.misc +++ /dev/null @@ -1 +0,0 @@ -Handle changes in `timeline_limit` in experimental sliding sync. diff --git a/changelog.d/17589.misc b/changelog.d/17589.misc deleted file mode 100644 index 1b4a53ee17..0000000000 --- a/changelog.d/17589.misc +++ /dev/null @@ -1 +0,0 @@ -Correctly track read receipts that should be sent down in experimental sliding sync. diff --git a/changelog.d/17592.misc b/changelog.d/17592.misc deleted file mode 100644 index 1b4a53ee17..0000000000 --- a/changelog.d/17592.misc +++ /dev/null @@ -1 +0,0 @@ -Correctly track read receipts that should be sent down in experimental sliding sync. diff --git a/changelog.d/17593.misc b/changelog.d/17593.misc deleted file mode 100644 index 60afc284be..0000000000 --- a/changelog.d/17593.misc +++ /dev/null @@ -1 +0,0 @@ -Add histogram metrics for sliding sync processing time. diff --git a/debian/changelog b/debian/changelog index 2692ab621d..f32dcc0450 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.114.0~rc1) stable; urgency=medium + + * New synapse release 1.114.0rc1. + + -- Synapse Packaging team Tue, 20 Aug 2024 12:55:28 +0000 + matrix-synapse-py3 (1.113.0) stable; urgency=medium * New Synapse release 1.113.0. diff --git a/pyproject.toml b/pyproject.toml index 82369f9052..b31eca75ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.113.0" +version = "1.114.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From a8e313836d1532fbb2b51bbe3ab64d1a068e6b33 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 20 Aug 2024 15:18:13 +0200 Subject: [PATCH 142/332] changelog: move SSSS some changes in the features section --- CHANGES.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8295f0d805..0a57cfb906 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,11 @@ # Synapse 1.114.0rc1 (2024-08-20) +### Features + +- Add a flag to `/versions`, `org.matrix.simplified_msc3575`, to indicate whether experimental sliding sync support has been enabled. ([\#17571](https://github.com/element-hq/synapse/issues/17571)) +- Handle changes in `timeline_limit` in experimental sliding sync. ([\#17579](https://github.com/element-hq/synapse/issues/17579)) +- Correctly track read receipts that should be sent down in experimental sliding sync. ([\#17575](https://github.com/element-hq/synapse/issues/17575), [\#17589](https://github.com/element-hq/synapse/issues/17589), [\#17592](https://github.com/element-hq/synapse/issues/17592)) + ### Bugfixes - Start handlers for new media endpoints when media resource configured. ([\#17483](https://github.com/element-hq/synapse/issues/17483)) @@ -28,10 +34,7 @@ - Speed up responding to media requests. ([\#17558](https://github.com/element-hq/synapse/issues/17558), [\#17561](https://github.com/element-hq/synapse/issues/17561), [\#17564](https://github.com/element-hq/synapse/issues/17564), [\#17566](https://github.com/element-hq/synapse/issues/17566), [\#17567](https://github.com/element-hq/synapse/issues/17567), [\#17569](https://github.com/element-hq/synapse/issues/17569)) - Test github token before running release script steps. ([\#17562](https://github.com/element-hq/synapse/issues/17562)) - Reduce log spam of multipart files. ([\#17563](https://github.com/element-hq/synapse/issues/17563)) -- Add a flag to `/versions`, `org.matrix.simplified_msc3575`, to indicate whether experimental sliding sync support has been enabled. ([\#17571](https://github.com/element-hq/synapse/issues/17571)) - Refactor per-connection state in experimental sliding sync handler. ([\#17574](https://github.com/element-hq/synapse/issues/17574)) -- Correctly track read receipts that should be sent down in experimental sliding sync. ([\#17575](https://github.com/element-hq/synapse/issues/17575), [\#17589](https://github.com/element-hq/synapse/issues/17589), [\#17592](https://github.com/element-hq/synapse/issues/17592)) -- Handle changes in `timeline_limit` in experimental sliding sync. ([\#17579](https://github.com/element-hq/synapse/issues/17579)) - Add histogram metrics for sliding sync processing time. ([\#17593](https://github.com/element-hq/synapse/issues/17593)) From 92b38c1afd1acd1b88b3ae313f05bcf1bec0e849 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2024 19:30:23 +0100 Subject: [PATCH 143/332] Sliding sync: Split up handler into its own module (#17595) That file was getting long. The changes are non functional, and simply split things up into: - the main class - the connection store - the extensions - the types --- changelog.d/17595.misc | 1 + .../__init__.py} | 1280 +---------------- synapse/handlers/sliding_sync/extensions.py | 660 +++++++++ synapse/handlers/sliding_sync/store.py | 200 +++ synapse/handlers/sliding_sync/types.py | 506 +++++++ 5 files changed, 1380 insertions(+), 1267 deletions(-) create mode 100644 changelog.d/17595.misc rename synapse/handlers/{sliding_sync.py => sliding_sync/__init__.py} (66%) create mode 100644 synapse/handlers/sliding_sync/extensions.py create mode 100644 synapse/handlers/sliding_sync/store.py create mode 100644 synapse/handlers/sliding_sync/types.py diff --git a/changelog.d/17595.misc b/changelog.d/17595.misc new file mode 100644 index 0000000000..c8e040d87c --- /dev/null +++ b/changelog.d/17595.misc @@ -0,0 +1 @@ +Refactor sliding sync class into multiple files. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync/__init__.py similarity index 66% rename from synapse/handlers/sliding_sync.py rename to synapse/handlers/sliding_sync/__init__.py index af8d7ab96c..1fcf2d149b 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -1,7 +1,7 @@ # # This file is licensed under the Affero General Public License (AGPL) version 3. # -# Copyright (C) 2024 New Vector, Ltd +# Copyright (C) 2023 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -11,36 +11,21 @@ # See the GNU Affero General Public License for more details: # . # -# Originally licensed under the Apache License, Version 2.0: -# . -# -# [This file includes modifications made by New Vector Limited] -# -# + import enum import logging -import typing -from collections import ChainMap -from enum import Enum from itertools import chain from typing import ( TYPE_CHECKING, Any, - Callable, Dict, - Final, - Generic, List, Literal, Mapping, - MutableMapping, Optional, - Sequence, Set, Tuple, - TypeVar, Union, - cast, ) import attr @@ -55,11 +40,18 @@ from synapse.api.constants import ( EventTypes, Membership, ) -from synapse.api.errors import SlidingSyncUnknownPosition from synapse.events import EventBase, StrippedStateEvent from synapse.events.utils import parse_stripped_state_event, strip_event -from synapse.handlers.receipts import ReceiptEventSource from synapse.handlers.relations import BundledAggregations +from synapse.handlers.sliding_sync.extensions import SlidingSyncExtensionHandler +from synapse.handlers.sliding_sync.store import SlidingSyncConnectionStore +from synapse.handlers.sliding_sync.types import ( + HaveSentRoomFlag, + MutablePerConnectionState, + PerConnectionState, + RoomSyncConfig, + StateValues, +) from synapse.logging.opentracing import ( SynapseTags, log_kv, @@ -79,10 +71,7 @@ from synapse.storage.databases.main.stream import ( ) from synapse.storage.roommember import MemberSummary from synapse.types import ( - DeviceListUpdates, JsonDict, - JsonMapping, - MultiWriterStreamToken, MutableStateMap, PersistedEventPosition, Requester, @@ -205,267 +194,6 @@ def filter_membership_for_sync( ) -# We can't freeze this class because we want to update it in place with the -# de-duplicated data. -@attr.s(slots=True, auto_attribs=True) -class RoomSyncConfig: - """ - Holds the config for what data we should fetch for a room in the sync response. - - Attributes: - timeline_limit: The maximum number of events to return in the timeline. - - required_state_map: Map from state event type to state_keys requested for the - room. The values are close to `StateKey` but actually use a syntax where you - can provide `*` wildcard and `$LAZY` for lazy-loading room members. - """ - - timeline_limit: int - required_state_map: Dict[str, Set[str]] - - @classmethod - def from_room_config( - cls, - room_params: SlidingSyncConfig.CommonRoomParameters, - ) -> "RoomSyncConfig": - """ - Create a `RoomSyncConfig` from a `SlidingSyncList`/`RoomSubscription` config. - - Args: - room_params: `SlidingSyncConfig.SlidingSyncList` or `SlidingSyncConfig.RoomSubscription` - """ - required_state_map: Dict[str, Set[str]] = {} - for ( - state_type, - state_key, - ) in room_params.required_state: - # If we already have a wildcard for this specific `state_key`, we don't need - # to add it since the wildcard already covers it. - if state_key in required_state_map.get(StateValues.WILDCARD, set()): - continue - - # If we already have a wildcard `state_key` for this `state_type`, we don't need - # to add anything else - if StateValues.WILDCARD in required_state_map.get(state_type, set()): - continue - - # If we're getting wildcards for the `state_type` and `state_key`, that's - # all that matters so get rid of any other entries - if state_type == StateValues.WILDCARD and state_key == StateValues.WILDCARD: - required_state_map = {StateValues.WILDCARD: {StateValues.WILDCARD}} - # We can break, since we don't need to add anything else - break - - # If we're getting a wildcard for the `state_type`, get rid of any other - # entries with the same `state_key`, since the wildcard will cover it already. - elif state_type == StateValues.WILDCARD: - # Get rid of any entries that match the `state_key` - # - # Make a copy so we don't run into an error: `dictionary changed size - # during iteration`, when we remove items - for ( - existing_state_type, - existing_state_key_set, - ) in list(required_state_map.items()): - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for existing_state_key in existing_state_key_set.copy(): - if existing_state_key == state_key: - existing_state_key_set.remove(state_key) - - # If we've the left the `set()` empty, remove it from the map - if existing_state_key_set == set(): - required_state_map.pop(existing_state_type, None) - - # If we're getting a wildcard `state_key`, get rid of any other state_keys - # for this `state_type` since the wildcard will cover it already. - if state_key == StateValues.WILDCARD: - required_state_map[state_type] = {state_key} - # Otherwise, just add it to the set - else: - if required_state_map.get(state_type) is None: - required_state_map[state_type] = {state_key} - else: - required_state_map[state_type].add(state_key) - - return cls( - timeline_limit=room_params.timeline_limit, - required_state_map=required_state_map, - ) - - def deep_copy(self) -> "RoomSyncConfig": - required_state_map: Dict[str, Set[str]] = { - state_type: state_key_set.copy() - for state_type, state_key_set in self.required_state_map.items() - } - - return RoomSyncConfig( - timeline_limit=self.timeline_limit, - required_state_map=required_state_map, - ) - - def combine_room_sync_config( - self, other_room_sync_config: "RoomSyncConfig" - ) -> None: - """ - Combine this `RoomSyncConfig` with another `RoomSyncConfig` and take the - superset union of the two. - """ - # Take the highest timeline limit - if self.timeline_limit < other_room_sync_config.timeline_limit: - self.timeline_limit = other_room_sync_config.timeline_limit - - # Union the required state - for ( - state_type, - state_key_set, - ) in other_room_sync_config.required_state_map.items(): - # If we already have a wildcard for everything, we don't need to add - # anything else - if StateValues.WILDCARD in self.required_state_map.get( - StateValues.WILDCARD, set() - ): - break - - # If we already have a wildcard `state_key` for this `state_type`, we don't need - # to add anything else - if StateValues.WILDCARD in self.required_state_map.get(state_type, set()): - continue - - # If we're getting wildcards for the `state_type` and `state_key`, that's - # all that matters so get rid of any other entries - if ( - state_type == StateValues.WILDCARD - and StateValues.WILDCARD in state_key_set - ): - self.required_state_map = {state_type: {StateValues.WILDCARD}} - # We can break, since we don't need to add anything else - break - - for state_key in state_key_set: - # If we already have a wildcard for this specific `state_key`, we don't need - # to add it since the wildcard already covers it. - if state_key in self.required_state_map.get( - StateValues.WILDCARD, set() - ): - continue - - # If we're getting a wildcard for the `state_type`, get rid of any other - # entries with the same `state_key`, since the wildcard will cover it already. - if state_type == StateValues.WILDCARD: - # Get rid of any entries that match the `state_key` - # - # Make a copy so we don't run into an error: `dictionary changed size - # during iteration`, when we remove items - for existing_state_type, existing_state_key_set in list( - self.required_state_map.items() - ): - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for existing_state_key in existing_state_key_set.copy(): - if existing_state_key == state_key: - existing_state_key_set.remove(state_key) - - # If we've the left the `set()` empty, remove it from the map - if existing_state_key_set == set(): - self.required_state_map.pop(existing_state_type, None) - - # If we're getting a wildcard `state_key`, get rid of any other state_keys - # for this `state_type` since the wildcard will cover it already. - if state_key == StateValues.WILDCARD: - self.required_state_map[state_type] = {state_key} - break - # Otherwise, just add it to the set - else: - if self.required_state_map.get(state_type) is None: - self.required_state_map[state_type] = {state_key} - else: - self.required_state_map[state_type].add(state_key) - - def must_await_full_state( - self, - is_mine_id: Callable[[str], bool], - ) -> bool: - """ - Check if we have a we're only requesting `required_state` which is completely - satisfied even with partial state, then we don't need to `await_full_state` before - we can return it. - - Also see `StateFilter.must_await_full_state(...)` for comparison - - Partially-stated rooms should have all state events except for remote membership - events so if we require a remote membership event anywhere, then we need to - return `True` (requires full state). - - Args: - is_mine_id: a callable which confirms if a given state_key matches a mxid - of a local user - """ - wildcard_state_keys = self.required_state_map.get(StateValues.WILDCARD) - # Requesting *all* state in the room so we have to wait - if ( - wildcard_state_keys is not None - and StateValues.WILDCARD in wildcard_state_keys - ): - return True - - # If the wildcards don't refer to remote user IDs, then we don't need to wait - # for full state. - if wildcard_state_keys is not None: - for possible_user_id in wildcard_state_keys: - if not possible_user_id[0].startswith(UserID.SIGIL): - # Not a user ID - continue - - localpart_hostname = possible_user_id.split(":", 1) - if len(localpart_hostname) < 2: - # Not a user ID - continue - - if not is_mine_id(possible_user_id): - return True - - membership_state_keys = self.required_state_map.get(EventTypes.Member) - # We aren't requesting any membership events at all so the partial state will - # cover us. - if membership_state_keys is None: - return False - - # If we're requesting entirely local users, the partial state will cover us. - for user_id in membership_state_keys: - if user_id == StateValues.ME: - continue - # We're lazy-loading membership so we can just return the state we have. - # Lazy-loading means we include membership for any event `sender` in the - # timeline but since we had to auth those timeline events, we will have the - # membership state for them (including from remote senders). - elif user_id == StateValues.LAZY: - continue - elif user_id == StateValues.WILDCARD: - return False - elif not is_mine_id(user_id): - return True - - # Local users only so the partial state will cover us. - return False - - -class StateValues: - """ - Understood values of the (type, state_key) tuple in `required_state`. - """ - - # Include all state events of the given type - WILDCARD: Final = "*" - # Lazy-load room membership events (include room membership events for any event - # `sender` in the timeline). We only give special meaning to this value when it's a - # `state_key`. - LAZY: Final = "$LAZY" - # Subsitute with the requester's user ID. Typically used by clients to get - # the user's membership. - ME: Final = "$ME" - - class SlidingSyncHandler: def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @@ -475,12 +203,11 @@ class SlidingSyncHandler: self.notifier = hs.get_notifier() self.event_sources = hs.get_event_sources() self.relations_handler = hs.get_relations_handler() - self.device_handler = hs.get_device_handler() - self.push_rules_handler = hs.get_push_rules_handler() self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync self.is_mine_id = hs.is_mine_id self.connection_store = SlidingSyncConnectionStore() + self.extensions = SlidingSyncExtensionHandler(hs) async def wait_for_sync_for_user( self, @@ -868,7 +595,7 @@ class SlidingSyncHandler: with start_active_span("sliding_sync.generate_room_entries"): await concurrently_execute(handle_room, relevant_rooms_to_send_map, 10) - extensions = await self.get_extensions_response( + extensions = await self.extensions.get_extensions_response( sync_config=sync_config, actual_lists=lists, previous_connection_state=previous_connection_state, @@ -2597,984 +2324,3 @@ class SlidingSyncHandler: notification_count=0, highlight_count=0, ) - - @trace - async def get_extensions_response( - self, - sync_config: SlidingSyncConfig, - previous_connection_state: "PerConnectionState", - new_connection_state: "MutablePerConnectionState", - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], - actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> SlidingSyncResult.Extensions: - """Handle extension requests. - - Args: - sync_config: Sync configuration - new_connection_state: Snapshot of the current per-connection state - new_per_connection_state: A mutable copy of the per-connection - state, used to record updates to the state during this request. - actual_lists: Sliding window API. A map of list key to list results in the - Sliding Sync response. - actual_room_ids: The actual room IDs in the the Sliding Sync response. - actual_room_response_map: A map of room ID to room results in the the - Sliding Sync response. - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. - """ - - if sync_config.extensions is None: - return SlidingSyncResult.Extensions() - - to_device_response = None - if sync_config.extensions.to_device is not None: - to_device_response = await self.get_to_device_extension_response( - sync_config=sync_config, - to_device_request=sync_config.extensions.to_device, - to_token=to_token, - ) - - e2ee_response = None - if sync_config.extensions.e2ee is not None: - e2ee_response = await self.get_e2ee_extension_response( - sync_config=sync_config, - e2ee_request=sync_config.extensions.e2ee, - to_token=to_token, - from_token=from_token, - ) - - account_data_response = None - if sync_config.extensions.account_data is not None: - account_data_response = await self.get_account_data_extension_response( - sync_config=sync_config, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - account_data_request=sync_config.extensions.account_data, - to_token=to_token, - from_token=from_token, - ) - - receipts_response = None - if sync_config.extensions.receipts is not None: - receipts_response = await self.get_receipts_extension_response( - sync_config=sync_config, - previous_connection_state=previous_connection_state, - new_connection_state=new_connection_state, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - actual_room_response_map=actual_room_response_map, - receipts_request=sync_config.extensions.receipts, - to_token=to_token, - from_token=from_token, - ) - - typing_response = None - if sync_config.extensions.typing is not None: - typing_response = await self.get_typing_extension_response( - sync_config=sync_config, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - actual_room_response_map=actual_room_response_map, - typing_request=sync_config.extensions.typing, - to_token=to_token, - from_token=from_token, - ) - - return SlidingSyncResult.Extensions( - to_device=to_device_response, - e2ee=e2ee_response, - account_data=account_data_response, - receipts=receipts_response, - typing=typing_response, - ) - - def find_relevant_room_ids_for_extension( - self, - requested_lists: Optional[List[str]], - requested_room_ids: Optional[List[str]], - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], - ) -> Set[str]: - """ - Handle the reserved `lists`/`rooms` keys for extensions. Extensions should only - return results for rooms in the Sliding Sync response. This matches up the - requested rooms/lists with the actual lists/rooms in the Sliding Sync response. - - {"lists": []} // Do not process any lists. - {"lists": ["rooms", "dms"]} // Process only a subset of lists. - {"lists": ["*"]} // Process all lists defined in the Sliding Window API. (This is the default.) - - {"rooms": []} // Do not process any specific rooms. - {"rooms": ["!a:b", "!c:d"]} // Process only a subset of room subscriptions. - {"rooms": ["*"]} // Process all room subscriptions defined in the Room Subscription API. (This is the default.) - - Args: - requested_lists: The `lists` from the extension request. - requested_room_ids: The `rooms` from the extension request. - actual_lists: The actual lists from the Sliding Sync response. - actual_room_ids: The actual room subscriptions from the Sliding Sync request. - """ - - # We only want to include account data for rooms that are already in the sliding - # sync response AND that were requested in the account data request. - relevant_room_ids: Set[str] = set() - - # See what rooms from the room subscriptions we should get account data for - if requested_room_ids is not None: - for room_id in requested_room_ids: - # A wildcard means we process all rooms from the room subscriptions - if room_id == "*": - relevant_room_ids.update(actual_room_ids) - break - - if room_id in actual_room_ids: - relevant_room_ids.add(room_id) - - # See what rooms from the sliding window lists we should get account data for - if requested_lists is not None: - for list_key in requested_lists: - # Just some typing because we share the variable name in multiple places - actual_list: Optional[SlidingSyncResult.SlidingWindowList] = None - - # A wildcard means we process rooms from all lists - if list_key == "*": - for actual_list in actual_lists.values(): - # We only expect a single SYNC operation for any list - assert len(actual_list.ops) == 1 - sync_op = actual_list.ops[0] - assert sync_op.op == OperationType.SYNC - - relevant_room_ids.update(sync_op.room_ids) - - break - - actual_list = actual_lists.get(list_key) - if actual_list is not None: - # We only expect a single SYNC operation for any list - assert len(actual_list.ops) == 1 - sync_op = actual_list.ops[0] - assert sync_op.op == OperationType.SYNC - - relevant_room_ids.update(sync_op.room_ids) - - return relevant_room_ids - - @trace - async def get_to_device_extension_response( - self, - sync_config: SlidingSyncConfig, - to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension, - to_token: StreamToken, - ) -> Optional[SlidingSyncResult.Extensions.ToDeviceExtension]: - """Handle to-device extension (MSC3885) - - Args: - sync_config: Sync configuration - to_device_request: The to-device extension from the request - to_token: The point in the stream to sync up to. - """ - user_id = sync_config.user.to_string() - device_id = sync_config.requester.device_id - - # Skip if the extension is not enabled - if not to_device_request.enabled: - return None - - # Check that this request has a valid device ID (not all requests have - # to belong to a device, and so device_id is None) - if device_id is None: - return SlidingSyncResult.Extensions.ToDeviceExtension( - next_batch=f"{to_token.to_device_key}", - events=[], - ) - - since_stream_id = 0 - if to_device_request.since is not None: - # We've already validated this is an int. - since_stream_id = int(to_device_request.since) - - if to_token.to_device_key < since_stream_id: - # The since token is ahead of our current token, so we return an - # empty response. - logger.warning( - "Got to-device.since from the future. since token: %r is ahead of our current to_device stream position: %r", - since_stream_id, - to_token.to_device_key, - ) - return SlidingSyncResult.Extensions.ToDeviceExtension( - next_batch=to_device_request.since, - events=[], - ) - - # Delete everything before the given since token, as we know the - # device must have received them. - deleted = await self.store.delete_messages_for_device( - user_id=user_id, - device_id=device_id, - up_to_stream_id=since_stream_id, - ) - - logger.debug( - "Deleted %d to-device messages up to %d for %s", - deleted, - since_stream_id, - user_id, - ) - - messages, stream_id = await self.store.get_messages_for_device( - user_id=user_id, - device_id=device_id, - from_stream_id=since_stream_id, - to_stream_id=to_token.to_device_key, - limit=min(to_device_request.limit, 100), # Limit to at most 100 events - ) - - return SlidingSyncResult.Extensions.ToDeviceExtension( - next_batch=f"{stream_id}", - events=messages, - ) - - @trace - async def get_e2ee_extension_response( - self, - sync_config: SlidingSyncConfig, - e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension, - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]: - """Handle E2EE device extension (MSC3884) - - Args: - sync_config: Sync configuration - e2ee_request: The e2ee extension from the request - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. - """ - user_id = sync_config.user.to_string() - device_id = sync_config.requester.device_id - - # Skip if the extension is not enabled - if not e2ee_request.enabled: - return None - - device_list_updates: Optional[DeviceListUpdates] = None - if from_token is not None: - # TODO: This should take into account the `from_token` and `to_token` - device_list_updates = await self.device_handler.get_user_ids_changed( - user_id=user_id, - from_token=from_token.stream_token, - ) - - device_one_time_keys_count: Mapping[str, int] = {} - device_unused_fallback_key_types: Sequence[str] = [] - if device_id: - # TODO: We should have a way to let clients differentiate between the states of: - # * no change in OTK count since the provided since token - # * the server has zero OTKs left for this device - # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 - device_one_time_keys_count = await self.store.count_e2e_one_time_keys( - user_id, device_id - ) - device_unused_fallback_key_types = ( - await self.store.get_e2e_unused_fallback_key_types(user_id, device_id) - ) - - return SlidingSyncResult.Extensions.E2eeExtension( - device_list_updates=device_list_updates, - device_one_time_keys_count=device_one_time_keys_count, - device_unused_fallback_key_types=device_unused_fallback_key_types, - ) - - @trace - async def get_account_data_extension_response( - self, - sync_config: SlidingSyncConfig, - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], - account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension, - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.AccountDataExtension]: - """Handle Account Data extension (MSC3959) - - Args: - sync_config: Sync configuration - actual_lists: Sliding window API. A map of list key to list results in the - Sliding Sync response. - actual_room_ids: The actual room IDs in the the Sliding Sync response. - account_data_request: The account_data extension from the request - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. - """ - user_id = sync_config.user.to_string() - - # Skip if the extension is not enabled - if not account_data_request.enabled: - return None - - global_account_data_map: Mapping[str, JsonMapping] = {} - if from_token is not None: - # TODO: This should take into account the `from_token` and `to_token` - global_account_data_map = ( - await self.store.get_updated_global_account_data_for_user( - user_id, from_token.stream_token.account_data_key - ) - ) - - have_push_rules_changed = await self.store.have_push_rules_changed_for_user( - user_id, from_token.stream_token.push_rules_key - ) - if have_push_rules_changed: - global_account_data_map = dict(global_account_data_map) - # TODO: This should take into account the `from_token` and `to_token` - global_account_data_map[AccountDataTypes.PUSH_RULES] = ( - await self.push_rules_handler.push_rules_for_user(sync_config.user) - ) - else: - # TODO: This should take into account the `to_token` - all_global_account_data = await self.store.get_global_account_data_for_user( - user_id - ) - - global_account_data_map = dict(all_global_account_data) - # TODO: This should take into account the `to_token` - global_account_data_map[AccountDataTypes.PUSH_RULES] = ( - await self.push_rules_handler.push_rules_for_user(sync_config.user) - ) - - # Fetch room account data - account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] = {} - relevant_room_ids = self.find_relevant_room_ids_for_extension( - requested_lists=account_data_request.lists, - requested_room_ids=account_data_request.rooms, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - ) - if len(relevant_room_ids) > 0: - if from_token is not None: - # TODO: This should take into account the `from_token` and `to_token` - account_data_by_room_map = ( - await self.store.get_updated_room_account_data_for_user( - user_id, from_token.stream_token.account_data_key - ) - ) - else: - # TODO: This should take into account the `to_token` - account_data_by_room_map = ( - await self.store.get_room_account_data_for_user(user_id) - ) - - # Filter down to the relevant rooms - account_data_by_room_map = { - room_id: account_data_map - for room_id, account_data_map in account_data_by_room_map.items() - if room_id in relevant_room_ids - } - - return SlidingSyncResult.Extensions.AccountDataExtension( - global_account_data_map=global_account_data_map, - account_data_by_room_map=account_data_by_room_map, - ) - - @trace - async def get_receipts_extension_response( - self, - sync_config: SlidingSyncConfig, - previous_connection_state: "PerConnectionState", - new_connection_state: "MutablePerConnectionState", - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], - actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], - receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension, - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.ReceiptsExtension]: - """Handle Receipts extension (MSC3960) - - Args: - sync_config: Sync configuration - previous_connection_state: The current per-connection state - new_connection_state: A mutable copy of the per-connection - state, used to record updates to the state. - actual_lists: Sliding window API. A map of list key to list results in the - Sliding Sync response. - actual_room_ids: The actual room IDs in the the Sliding Sync response. - actual_room_response_map: A map of room ID to room results in the the - Sliding Sync response. - account_data_request: The account_data extension from the request - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. - """ - # Skip if the extension is not enabled - if not receipts_request.enabled: - return None - - relevant_room_ids = self.find_relevant_room_ids_for_extension( - requested_lists=receipts_request.lists, - requested_room_ids=receipts_request.rooms, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - ) - - room_id_to_receipt_map: Dict[str, JsonMapping] = {} - if len(relevant_room_ids) > 0: - # We need to handle the different cases depending on if we have sent - # down receipts previously or not, so we split the relevant rooms - # up into different collections based on status. - live_rooms = set() - previously_rooms: Dict[str, MultiWriterStreamToken] = {} - initial_rooms = set() - - for room_id in relevant_room_ids: - if not from_token: - initial_rooms.add(room_id) - continue - - # If we're sending down the room from scratch again for some reason, we - # should always resend the receipts as well (regardless of if - # we've sent them down before). This is to mimic the behaviour - # of what happens on initial sync, where you get a chunk of - # timeline with all of the corresponding receipts for the events in the timeline. - room_result = actual_room_response_map.get(room_id) - if room_result is not None and room_result.initial: - initial_rooms.add(room_id) - continue - - room_status = previous_connection_state.receipts.have_sent_room(room_id) - if room_status.status == HaveSentRoomFlag.LIVE: - live_rooms.add(room_id) - elif room_status.status == HaveSentRoomFlag.PREVIOUSLY: - assert room_status.last_token is not None - previously_rooms[room_id] = room_status.last_token - elif room_status.status == HaveSentRoomFlag.NEVER: - initial_rooms.add(room_id) - else: - assert_never(room_status.status) - - # The set of receipts that we fetched. Private receipts need to be - # filtered out before returning. - fetched_receipts = [] - - # For live rooms we just fetch all receipts in those rooms since the - # `since` token. - if live_rooms: - assert from_token is not None - receipts = await self.store.get_linearized_receipts_for_rooms( - room_ids=live_rooms, - from_key=from_token.stream_token.receipt_key, - to_key=to_token.receipt_key, - ) - fetched_receipts.extend(receipts) - - # For rooms we've previously sent down, but aren't up to date, we - # need to use the from token from the room status. - if previously_rooms: - for room_id, receipt_token in previously_rooms.items(): - # TODO: Limit the number of receipts we're about to send down - # for the room, if its too many we should TODO - previously_receipts = ( - await self.store.get_linearized_receipts_for_room( - room_id=room_id, - from_key=receipt_token, - to_key=to_token.receipt_key, - ) - ) - fetched_receipts.extend(previously_receipts) - - # For rooms we haven't previously sent down, we could send all receipts - # from that room but we only want to include receipts for events - # in the timeline to avoid bloating and blowing up the sync response - # as the number of users in the room increases. (this behavior is part of the spec) - initial_rooms_and_event_ids = [ - (room_id, event.event_id) - for room_id in initial_rooms - if room_id in actual_room_response_map - for event in actual_room_response_map[room_id].timeline_events - ] - if initial_rooms_and_event_ids: - initial_receipts = await self.store.get_linearized_receipts_for_events( - room_and_event_ids=initial_rooms_and_event_ids, - ) - fetched_receipts.extend(initial_receipts) - - fetched_receipts = ReceiptEventSource.filter_out_private_receipts( - fetched_receipts, sync_config.user.to_string() - ) - - for receipt in fetched_receipts: - # These fields should exist for every receipt - room_id = receipt["room_id"] - type = receipt["type"] - content = receipt["content"] - - room_id_to_receipt_map[room_id] = {"type": type, "content": content} - - # Now we update the per-connection state to track which receipts we have - # and haven't sent down. - new_connection_state.receipts.record_sent_rooms(relevant_room_ids) - - if from_token: - # Now find the set of rooms that may have receipts that we're not sending - # down. We only need to check rooms that we have previously returned - # receipts for (in `previous_connection_state`) because we only care about - # updating `LIVE` rooms to `PREVIOUSLY`. The `PREVIOUSLY` rooms will just - # stay pointing at their previous position so we don't need to waste time - # checking those and since we default to `NEVER`, rooms that were `NEVER` - # sent before don't need to be recorded as we'll handle them correctly when - # they come into range for the first time. - rooms_no_receipts = [ - room_id - for room_id, room_status in previous_connection_state.receipts._statuses.items() - if room_status.status == HaveSentRoomFlag.LIVE - and room_id not in relevant_room_ids - ] - changed_rooms = await self.store.get_rooms_with_receipts_between( - rooms_no_receipts, - from_key=from_token.stream_token.receipt_key, - to_key=to_token.receipt_key, - ) - new_connection_state.receipts.record_unsent_rooms( - changed_rooms, from_token.stream_token.receipt_key - ) - - return SlidingSyncResult.Extensions.ReceiptsExtension( - room_id_to_receipt_map=room_id_to_receipt_map, - ) - - async def get_typing_extension_response( - self, - sync_config: SlidingSyncConfig, - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], - actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], - typing_request: SlidingSyncConfig.Extensions.TypingExtension, - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.TypingExtension]: - """Handle Typing Notification extension (MSC3961) - - Args: - sync_config: Sync configuration - actual_lists: Sliding window API. A map of list key to list results in the - Sliding Sync response. - actual_room_ids: The actual room IDs in the the Sliding Sync response. - actual_room_response_map: A map of room ID to room results in the the - Sliding Sync response. - account_data_request: The account_data extension from the request - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. - """ - # Skip if the extension is not enabled - if not typing_request.enabled: - return None - - relevant_room_ids = self.find_relevant_room_ids_for_extension( - requested_lists=typing_request.lists, - requested_room_ids=typing_request.rooms, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - ) - - room_id_to_typing_map: Dict[str, JsonMapping] = {} - if len(relevant_room_ids) > 0: - # Note: We don't need to take connection tracking into account for typing - # notifications because they'll get anything still relevant and hasn't timed - # out when the room comes into range. We consider the gap where the room - # fell out of range, as long enough for any typing notifications to have - # timed out (it's not worth the 30 seconds of data we may have missed). - typing_source = self.event_sources.sources.typing - typing_notifications, _ = await typing_source.get_new_events( - user=sync_config.user, - from_key=(from_token.stream_token.typing_key if from_token else 0), - to_key=to_token.typing_key, - # This is a dummy value and isn't used in the function - limit=0, - room_ids=relevant_room_ids, - is_guest=False, - ) - - for typing_notification in typing_notifications: - # These fields should exist for every typing notification - room_id = typing_notification["room_id"] - type = typing_notification["type"] - content = typing_notification["content"] - - room_id_to_typing_map[room_id] = {"type": type, "content": content} - - return SlidingSyncResult.Extensions.TypingExtension( - room_id_to_typing_map=room_id_to_typing_map, - ) - - -class HaveSentRoomFlag(Enum): - """Flag for whether we have sent the room down a sliding sync connection. - - The valid state changes here are: - NEVER -> LIVE - LIVE -> PREVIOUSLY - PREVIOUSLY -> LIVE - """ - - # The room has never been sent down (or we have forgotten we have sent it - # down). - NEVER = 1 - - # We have previously sent the room down, but there are updates that we - # haven't sent down. - PREVIOUSLY = 2 - - # We have sent the room down and the client has received all updates. - LIVE = 3 - - -T = TypeVar("T") - - -@attr.s(auto_attribs=True, slots=True, frozen=True) -class HaveSentRoom(Generic[T]): - """Whether we have sent the room data down a sliding sync connection. - - We are generic over the type of token used, e.g. `RoomStreamToken` or - `MultiWriterStreamToken`. - - Attributes: - status: Flag of if we have or haven't sent down the room - last_token: If the flag is `PREVIOUSLY` then this is non-null and - contains the last stream token of the last updates we sent down - the room, i.e. we still need to send everything since then to the - client. - """ - - status: HaveSentRoomFlag - last_token: Optional[T] - - @staticmethod - def live() -> "HaveSentRoom[T]": - return HaveSentRoom(HaveSentRoomFlag.LIVE, None) - - @staticmethod - def previously(last_token: T) -> "HaveSentRoom[T]": - """Constructor for `PREVIOUSLY` flag.""" - return HaveSentRoom(HaveSentRoomFlag.PREVIOUSLY, last_token) - - @staticmethod - def never() -> "HaveSentRoom[T]": - return HaveSentRoom(HaveSentRoomFlag.NEVER, None) - - -@attr.s(auto_attribs=True, slots=True, frozen=True) -class RoomStatusMap(Generic[T]): - """For a given stream, e.g. events, records what we have or have not sent - down for that stream in a given room.""" - - # `room_id` -> `HaveSentRoom` - _statuses: Mapping[str, HaveSentRoom[T]] = attr.Factory(dict) - - def have_sent_room(self, room_id: str) -> HaveSentRoom[T]: - """Return whether we have previously sent the room down""" - return self._statuses.get(room_id, HaveSentRoom.never()) - - def get_mutable(self) -> "MutableRoomStatusMap[T]": - """Get a mutable copy of this state.""" - return MutableRoomStatusMap( - statuses=self._statuses, - ) - - def copy(self) -> "RoomStatusMap[T]": - """Make a copy of the class. Useful for converting from a mutable to - immutable version.""" - - return RoomStatusMap(statuses=dict(self._statuses)) - - -class MutableRoomStatusMap(RoomStatusMap[T]): - """A mutable version of `RoomStatusMap`""" - - # We use a ChainMap here so that we can easily track what has been updated - # and what hasn't. Note that when we persist the per connection state this - # will get flattened to a normal dict (via calling `.copy()`) - _statuses: typing.ChainMap[str, HaveSentRoom[T]] - - def __init__( - self, - statuses: Mapping[str, HaveSentRoom[T]], - ) -> None: - # ChainMap requires a mutable mapping, but we're not actually going to - # mutate it. - statuses = cast(MutableMapping, statuses) - - super().__init__( - statuses=ChainMap({}, statuses), - ) - - def get_updates(self) -> Mapping[str, HaveSentRoom[T]]: - """Return only the changes that were made""" - return self._statuses.maps[0] - - def record_sent_rooms(self, room_ids: StrCollection) -> None: - """Record that we have sent these rooms in the response""" - for room_id in room_ids: - current_status = self._statuses.get(room_id, HaveSentRoom.never()) - if current_status.status == HaveSentRoomFlag.LIVE: - continue - - self._statuses[room_id] = HaveSentRoom.live() - - def record_unsent_rooms(self, room_ids: StrCollection, from_token: T) -> None: - """Record that we have not sent these rooms in the response, but there - have been updates. - """ - # Whether we add/update the entries for unsent rooms depends on the - # existing entry: - # - LIVE: We have previously sent down everything up to - # `last_room_token, so we update the entry to be `PREVIOUSLY` with - # `last_room_token`. - # - PREVIOUSLY: We have previously sent down everything up to *a* - # given token, so we don't need to update the entry. - # - NEVER: We have never previously sent down the room, and we haven't - # sent anything down this time either so we leave it as NEVER. - - for room_id in room_ids: - current_status = self._statuses.get(room_id, HaveSentRoom.never()) - if current_status.status != HaveSentRoomFlag.LIVE: - continue - - self._statuses[room_id] = HaveSentRoom.previously(from_token) - - -@attr.s(auto_attribs=True) -class PerConnectionState: - """The per-connection state. A snapshot of what we've sent down the - connection before. - - Currently, we track whether we've sent down various aspects of a given room - before. - - We use the `rooms` field to store the position in the events stream for each - room that we've previously sent to the client before. On the next request - that includes the room, we can then send only what's changed since that - recorded position. - - Same goes for the `receipts` field so we only need to send the new receipts - since the last time you made a sync request. - - Attributes: - rooms: The status of each room for the events stream. - receipts: The status of each room for the receipts stream. - room_configs: Map from room_id to the `RoomSyncConfig` of all - rooms that we have previously sent down. - """ - - rooms: RoomStatusMap[RoomStreamToken] = attr.Factory(RoomStatusMap) - receipts: RoomStatusMap[MultiWriterStreamToken] = attr.Factory(RoomStatusMap) - - room_configs: Mapping[str, RoomSyncConfig] = attr.Factory(dict) - - def get_mutable(self) -> "MutablePerConnectionState": - """Get a mutable copy of this state.""" - room_configs = cast(MutableMapping[str, RoomSyncConfig], self.room_configs) - - return MutablePerConnectionState( - rooms=self.rooms.get_mutable(), - receipts=self.receipts.get_mutable(), - room_configs=ChainMap({}, room_configs), - ) - - def copy(self) -> "PerConnectionState": - return PerConnectionState( - rooms=self.rooms.copy(), - receipts=self.receipts.copy(), - room_configs=dict(self.room_configs), - ) - - -@attr.s(auto_attribs=True) -class MutablePerConnectionState(PerConnectionState): - """A mutable version of `PerConnectionState`""" - - rooms: MutableRoomStatusMap[RoomStreamToken] - receipts: MutableRoomStatusMap[MultiWriterStreamToken] - - room_configs: typing.ChainMap[str, RoomSyncConfig] - - def has_updates(self) -> bool: - return ( - bool(self.rooms.get_updates()) - or bool(self.receipts.get_updates()) - or bool(self.get_room_config_updates()) - ) - - def get_room_config_updates(self) -> Mapping[str, RoomSyncConfig]: - """Get updates to the room sync config""" - return self.room_configs.maps[0] - - -@attr.s(auto_attribs=True) -class SlidingSyncConnectionStore: - """In-memory store of per-connection state, including what rooms we have - previously sent down a sliding sync connection. - - Note: This is NOT safe to run in a worker setup because connection positions will - point to different sets of rooms on different workers. e.g. for the same connection, - a connection position of 5 might have totally different states on worker A and - worker B. - - One complication that we need to deal with here is needing to handle requests being - resent, i.e. if we sent down a room in a response that the client received, we must - consider the room *not* sent when we get the request again. - - This is handled by using an integer "token", which is returned to the client - as part of the sync token. For each connection we store a mapping from - tokens to the room states, and create a new entry when we send down new - rooms. - - Note that for any given sliding sync connection we will only store a maximum - of two different tokens: the previous token from the request and a new token - sent in the response. When we receive a request with a given token, we then - clear out all other entries with a different token. - - Attributes: - _connections: Mapping from `(user_id, conn_id)` to mapping of `token` - to mapping of room ID to `HaveSentRoom`. - """ - - # `(user_id, conn_id)` -> `connection_position` -> `PerConnectionState` - _connections: Dict[Tuple[str, str], Dict[int, PerConnectionState]] = attr.Factory( - dict - ) - - async def is_valid_token( - self, sync_config: SlidingSyncConfig, connection_token: int - ) -> bool: - """Return whether the connection token is valid/recognized""" - if connection_token == 0: - return True - - conn_key = self._get_connection_key(sync_config) - return connection_token in self._connections.get(conn_key, {}) - - async def get_per_connection_state( - self, - sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken], - ) -> PerConnectionState: - """Fetch the per-connection state for the token. - - Raises: - SlidingSyncUnknownPosition if the connection_token is unknown - """ - if from_token is None: - return PerConnectionState() - - connection_position = from_token.connection_position - if connection_position == 0: - # Initial sync (request without a `from_token`) starts at `0` so - # there is no existing per-connection state - return PerConnectionState() - - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.get(conn_key, {}) - connection_state = sync_statuses.get(connection_position) - - if connection_state is None: - raise SlidingSyncUnknownPosition() - - return connection_state - - @trace - async def record_new_state( - self, - sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken], - new_connection_state: MutablePerConnectionState, - ) -> int: - """Record updated per-connection state, returning the connection - position associated with the new state. - If there are no changes to the state this may return the same token as - the existing per-connection state. - """ - prev_connection_token = 0 - if from_token is not None: - prev_connection_token = from_token.connection_position - - if not new_connection_state.has_updates(): - return prev_connection_token - - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.setdefault(conn_key, {}) - - # Generate a new token, removing any existing entries in that token - # (which can happen if requests get resent). - new_store_token = prev_connection_token + 1 - sync_statuses.pop(new_store_token, None) - - # We copy the `MutablePerConnectionState` so that the inner `ChainMap`s - # don't grow forever. - sync_statuses[new_store_token] = new_connection_state.copy() - - return new_store_token - - @trace - async def mark_token_seen( - self, - sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken], - ) -> None: - """We have received a request with the given token, so we can clear out - any other tokens associated with the connection. - - If there is no from token then we have started afresh, and so we delete - all tokens associated with the device. - """ - # Clear out any tokens for the connection that doesn't match the one - # from the request. - - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.pop(conn_key, {}) - if from_token is None: - return - - sync_statuses = { - connection_token: room_statuses - for connection_token, room_statuses in sync_statuses.items() - if connection_token == from_token.connection_position - } - if sync_statuses: - self._connections[conn_key] = sync_statuses - - @staticmethod - def _get_connection_key(sync_config: SlidingSyncConfig) -> Tuple[str, str]: - """Return a unique identifier for this connection. - - The first part is simply the user ID. - - The second part is generally a combination of device ID and conn_id. - However, both these two are optional (e.g. puppet access tokens don't - have device IDs), so this handles those edge cases. - - We use this over the raw `conn_id` to avoid clashes between different - clients that use the same `conn_id`. Imagine a user uses a web client - that uses `conn_id: main_sync_loop` and an Android client that also has - a `conn_id: main_sync_loop`. - """ - - user_id = sync_config.user.to_string() - - # Only one sliding sync connection is allowed per given conn_id (empty - # or not). - conn_id = sync_config.conn_id or "" - - if sync_config.requester.device_id: - return (user_id, f"D/{sync_config.requester.device_id}/{conn_id}") - - if sync_config.requester.access_token_id: - # If we don't have a device, then the access token ID should be a - # stable ID. - return (user_id, f"A/{sync_config.requester.access_token_id}/{conn_id}") - - # If we have neither then its likely an AS or some weird token. Either - # way we can just fail here. - raise Exception("Cannot use sliding sync with access token type") diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py new file mode 100644 index 0000000000..599c74429e --- /dev/null +++ b/synapse/handlers/sliding_sync/extensions.py @@ -0,0 +1,660 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + +import logging +from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Sequence, Set + +from typing_extensions import assert_never + +from synapse.api.constants import AccountDataTypes +from synapse.handlers.receipts import ReceiptEventSource +from synapse.handlers.sliding_sync.types import ( + HaveSentRoomFlag, + MutablePerConnectionState, + PerConnectionState, +) +from synapse.logging.opentracing import trace +from synapse.types import ( + DeviceListUpdates, + JsonMapping, + MultiWriterStreamToken, + SlidingSyncStreamToken, + StreamToken, +) +from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class SlidingSyncExtensionHandler: + """Handles the extensions to sliding sync.""" + + def __init__(self, hs: "HomeServer"): + self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.device_handler = hs.get_device_handler() + self.push_rules_handler = hs.get_push_rules_handler() + + @trace + async def get_extensions_response( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + new_connection_state: "MutablePerConnectionState", + actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> SlidingSyncResult.Extensions: + """Handle extension requests. + + Args: + sync_config: Sync configuration + new_connection_state: Snapshot of the current per-connection state + new_per_connection_state: A mutable copy of the per-connection + state, used to record updates to the state during this request. + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + actual_room_response_map: A map of room ID to room results in the the + Sliding Sync response. + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + + if sync_config.extensions is None: + return SlidingSyncResult.Extensions() + + to_device_response = None + if sync_config.extensions.to_device is not None: + to_device_response = await self.get_to_device_extension_response( + sync_config=sync_config, + to_device_request=sync_config.extensions.to_device, + to_token=to_token, + ) + + e2ee_response = None + if sync_config.extensions.e2ee is not None: + e2ee_response = await self.get_e2ee_extension_response( + sync_config=sync_config, + e2ee_request=sync_config.extensions.e2ee, + to_token=to_token, + from_token=from_token, + ) + + account_data_response = None + if sync_config.extensions.account_data is not None: + account_data_response = await self.get_account_data_extension_response( + sync_config=sync_config, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + account_data_request=sync_config.extensions.account_data, + to_token=to_token, + from_token=from_token, + ) + + receipts_response = None + if sync_config.extensions.receipts is not None: + receipts_response = await self.get_receipts_extension_response( + sync_config=sync_config, + previous_connection_state=previous_connection_state, + new_connection_state=new_connection_state, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + actual_room_response_map=actual_room_response_map, + receipts_request=sync_config.extensions.receipts, + to_token=to_token, + from_token=from_token, + ) + + typing_response = None + if sync_config.extensions.typing is not None: + typing_response = await self.get_typing_extension_response( + sync_config=sync_config, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + actual_room_response_map=actual_room_response_map, + typing_request=sync_config.extensions.typing, + to_token=to_token, + from_token=from_token, + ) + + return SlidingSyncResult.Extensions( + to_device=to_device_response, + e2ee=e2ee_response, + account_data=account_data_response, + receipts=receipts_response, + typing=typing_response, + ) + + def find_relevant_room_ids_for_extension( + self, + requested_lists: Optional[List[str]], + requested_room_ids: Optional[List[str]], + actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + ) -> Set[str]: + """ + Handle the reserved `lists`/`rooms` keys for extensions. Extensions should only + return results for rooms in the Sliding Sync response. This matches up the + requested rooms/lists with the actual lists/rooms in the Sliding Sync response. + + {"lists": []} // Do not process any lists. + {"lists": ["rooms", "dms"]} // Process only a subset of lists. + {"lists": ["*"]} // Process all lists defined in the Sliding Window API. (This is the default.) + + {"rooms": []} // Do not process any specific rooms. + {"rooms": ["!a:b", "!c:d"]} // Process only a subset of room subscriptions. + {"rooms": ["*"]} // Process all room subscriptions defined in the Room Subscription API. (This is the default.) + + Args: + requested_lists: The `lists` from the extension request. + requested_room_ids: The `rooms` from the extension request. + actual_lists: The actual lists from the Sliding Sync response. + actual_room_ids: The actual room subscriptions from the Sliding Sync request. + """ + + # We only want to include account data for rooms that are already in the sliding + # sync response AND that were requested in the account data request. + relevant_room_ids: Set[str] = set() + + # See what rooms from the room subscriptions we should get account data for + if requested_room_ids is not None: + for room_id in requested_room_ids: + # A wildcard means we process all rooms from the room subscriptions + if room_id == "*": + relevant_room_ids.update(actual_room_ids) + break + + if room_id in actual_room_ids: + relevant_room_ids.add(room_id) + + # See what rooms from the sliding window lists we should get account data for + if requested_lists is not None: + for list_key in requested_lists: + # Just some typing because we share the variable name in multiple places + actual_list: Optional[SlidingSyncResult.SlidingWindowList] = None + + # A wildcard means we process rooms from all lists + if list_key == "*": + for actual_list in actual_lists.values(): + # We only expect a single SYNC operation for any list + assert len(actual_list.ops) == 1 + sync_op = actual_list.ops[0] + assert sync_op.op == OperationType.SYNC + + relevant_room_ids.update(sync_op.room_ids) + + break + + actual_list = actual_lists.get(list_key) + if actual_list is not None: + # We only expect a single SYNC operation for any list + assert len(actual_list.ops) == 1 + sync_op = actual_list.ops[0] + assert sync_op.op == OperationType.SYNC + + relevant_room_ids.update(sync_op.room_ids) + + return relevant_room_ids + + @trace + async def get_to_device_extension_response( + self, + sync_config: SlidingSyncConfig, + to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension, + to_token: StreamToken, + ) -> Optional[SlidingSyncResult.Extensions.ToDeviceExtension]: + """Handle to-device extension (MSC3885) + + Args: + sync_config: Sync configuration + to_device_request: The to-device extension from the request + to_token: The point in the stream to sync up to. + """ + user_id = sync_config.user.to_string() + device_id = sync_config.requester.device_id + + # Skip if the extension is not enabled + if not to_device_request.enabled: + return None + + # Check that this request has a valid device ID (not all requests have + # to belong to a device, and so device_id is None) + if device_id is None: + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=f"{to_token.to_device_key}", + events=[], + ) + + since_stream_id = 0 + if to_device_request.since is not None: + # We've already validated this is an int. + since_stream_id = int(to_device_request.since) + + if to_token.to_device_key < since_stream_id: + # The since token is ahead of our current token, so we return an + # empty response. + logger.warning( + "Got to-device.since from the future. since token: %r is ahead of our current to_device stream position: %r", + since_stream_id, + to_token.to_device_key, + ) + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=to_device_request.since, + events=[], + ) + + # Delete everything before the given since token, as we know the + # device must have received them. + deleted = await self.store.delete_messages_for_device( + user_id=user_id, + device_id=device_id, + up_to_stream_id=since_stream_id, + ) + + logger.debug( + "Deleted %d to-device messages up to %d for %s", + deleted, + since_stream_id, + user_id, + ) + + messages, stream_id = await self.store.get_messages_for_device( + user_id=user_id, + device_id=device_id, + from_stream_id=since_stream_id, + to_stream_id=to_token.to_device_key, + limit=min(to_device_request.limit, 100), # Limit to at most 100 events + ) + + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=f"{stream_id}", + events=messages, + ) + + @trace + async def get_e2ee_extension_response( + self, + sync_config: SlidingSyncConfig, + e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]: + """Handle E2EE device extension (MSC3884) + + Args: + sync_config: Sync configuration + e2ee_request: The e2ee extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + user_id = sync_config.user.to_string() + device_id = sync_config.requester.device_id + + # Skip if the extension is not enabled + if not e2ee_request.enabled: + return None + + device_list_updates: Optional[DeviceListUpdates] = None + if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` + device_list_updates = await self.device_handler.get_user_ids_changed( + user_id=user_id, + from_token=from_token.stream_token, + ) + + device_one_time_keys_count: Mapping[str, int] = {} + device_unused_fallback_key_types: Sequence[str] = [] + if device_id: + # TODO: We should have a way to let clients differentiate between the states of: + # * no change in OTK count since the provided since token + # * the server has zero OTKs left for this device + # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 + device_one_time_keys_count = await self.store.count_e2e_one_time_keys( + user_id, device_id + ) + device_unused_fallback_key_types = ( + await self.store.get_e2e_unused_fallback_key_types(user_id, device_id) + ) + + return SlidingSyncResult.Extensions.E2eeExtension( + device_list_updates=device_list_updates, + device_one_time_keys_count=device_one_time_keys_count, + device_unused_fallback_key_types=device_unused_fallback_key_types, + ) + + @trace + async def get_account_data_extension_response( + self, + sync_config: SlidingSyncConfig, + actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.AccountDataExtension]: + """Handle Account Data extension (MSC3959) + + Args: + sync_config: Sync configuration + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + account_data_request: The account_data extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + user_id = sync_config.user.to_string() + + # Skip if the extension is not enabled + if not account_data_request.enabled: + return None + + global_account_data_map: Mapping[str, JsonMapping] = {} + if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` + global_account_data_map = ( + await self.store.get_updated_global_account_data_for_user( + user_id, from_token.stream_token.account_data_key + ) + ) + + have_push_rules_changed = await self.store.have_push_rules_changed_for_user( + user_id, from_token.stream_token.push_rules_key + ) + if have_push_rules_changed: + global_account_data_map = dict(global_account_data_map) + # TODO: This should take into account the `from_token` and `to_token` + global_account_data_map[AccountDataTypes.PUSH_RULES] = ( + await self.push_rules_handler.push_rules_for_user(sync_config.user) + ) + else: + # TODO: This should take into account the `to_token` + all_global_account_data = await self.store.get_global_account_data_for_user( + user_id + ) + + global_account_data_map = dict(all_global_account_data) + # TODO: This should take into account the `to_token` + global_account_data_map[AccountDataTypes.PUSH_RULES] = ( + await self.push_rules_handler.push_rules_for_user(sync_config.user) + ) + + # Fetch room account data + account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] = {} + relevant_room_ids = self.find_relevant_room_ids_for_extension( + requested_lists=account_data_request.lists, + requested_room_ids=account_data_request.rooms, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + ) + if len(relevant_room_ids) > 0: + if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` + account_data_by_room_map = ( + await self.store.get_updated_room_account_data_for_user( + user_id, from_token.stream_token.account_data_key + ) + ) + else: + # TODO: This should take into account the `to_token` + account_data_by_room_map = ( + await self.store.get_room_account_data_for_user(user_id) + ) + + # Filter down to the relevant rooms + account_data_by_room_map = { + room_id: account_data_map + for room_id, account_data_map in account_data_by_room_map.items() + if room_id in relevant_room_ids + } + + return SlidingSyncResult.Extensions.AccountDataExtension( + global_account_data_map=global_account_data_map, + account_data_by_room_map=account_data_by_room_map, + ) + + @trace + async def get_receipts_extension_response( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + new_connection_state: "MutablePerConnectionState", + actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], + receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.ReceiptsExtension]: + """Handle Receipts extension (MSC3960) + + Args: + sync_config: Sync configuration + previous_connection_state: The current per-connection state + new_connection_state: A mutable copy of the per-connection + state, used to record updates to the state. + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + actual_room_response_map: A map of room ID to room results in the the + Sliding Sync response. + account_data_request: The account_data extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + # Skip if the extension is not enabled + if not receipts_request.enabled: + return None + + relevant_room_ids = self.find_relevant_room_ids_for_extension( + requested_lists=receipts_request.lists, + requested_room_ids=receipts_request.rooms, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + ) + + room_id_to_receipt_map: Dict[str, JsonMapping] = {} + if len(relevant_room_ids) > 0: + # We need to handle the different cases depending on if we have sent + # down receipts previously or not, so we split the relevant rooms + # up into different collections based on status. + live_rooms = set() + previously_rooms: Dict[str, MultiWriterStreamToken] = {} + initial_rooms = set() + + for room_id in relevant_room_ids: + if not from_token: + initial_rooms.add(room_id) + continue + + # If we're sending down the room from scratch again for some reason, we + # should always resend the receipts as well (regardless of if + # we've sent them down before). This is to mimic the behaviour + # of what happens on initial sync, where you get a chunk of + # timeline with all of the corresponding receipts for the events in the timeline. + room_result = actual_room_response_map.get(room_id) + if room_result is not None and room_result.initial: + initial_rooms.add(room_id) + continue + + room_status = previous_connection_state.receipts.have_sent_room(room_id) + if room_status.status == HaveSentRoomFlag.LIVE: + live_rooms.add(room_id) + elif room_status.status == HaveSentRoomFlag.PREVIOUSLY: + assert room_status.last_token is not None + previously_rooms[room_id] = room_status.last_token + elif room_status.status == HaveSentRoomFlag.NEVER: + initial_rooms.add(room_id) + else: + assert_never(room_status.status) + + # The set of receipts that we fetched. Private receipts need to be + # filtered out before returning. + fetched_receipts = [] + + # For live rooms we just fetch all receipts in those rooms since the + # `since` token. + if live_rooms: + assert from_token is not None + receipts = await self.store.get_linearized_receipts_for_rooms( + room_ids=live_rooms, + from_key=from_token.stream_token.receipt_key, + to_key=to_token.receipt_key, + ) + fetched_receipts.extend(receipts) + + # For rooms we've previously sent down, but aren't up to date, we + # need to use the from token from the room status. + if previously_rooms: + for room_id, receipt_token in previously_rooms.items(): + # TODO: Limit the number of receipts we're about to send down + # for the room, if its too many we should TODO + previously_receipts = ( + await self.store.get_linearized_receipts_for_room( + room_id=room_id, + from_key=receipt_token, + to_key=to_token.receipt_key, + ) + ) + fetched_receipts.extend(previously_receipts) + + # For rooms we haven't previously sent down, we could send all receipts + # from that room but we only want to include receipts for events + # in the timeline to avoid bloating and blowing up the sync response + # as the number of users in the room increases. (this behavior is part of the spec) + initial_rooms_and_event_ids = [ + (room_id, event.event_id) + for room_id in initial_rooms + if room_id in actual_room_response_map + for event in actual_room_response_map[room_id].timeline_events + ] + if initial_rooms_and_event_ids: + initial_receipts = await self.store.get_linearized_receipts_for_events( + room_and_event_ids=initial_rooms_and_event_ids, + ) + fetched_receipts.extend(initial_receipts) + + fetched_receipts = ReceiptEventSource.filter_out_private_receipts( + fetched_receipts, sync_config.user.to_string() + ) + + for receipt in fetched_receipts: + # These fields should exist for every receipt + room_id = receipt["room_id"] + type = receipt["type"] + content = receipt["content"] + + room_id_to_receipt_map[room_id] = {"type": type, "content": content} + + # Now we update the per-connection state to track which receipts we have + # and haven't sent down. + new_connection_state.receipts.record_sent_rooms(relevant_room_ids) + + if from_token: + # Now find the set of rooms that may have receipts that we're not sending + # down. We only need to check rooms that we have previously returned + # receipts for (in `previous_connection_state`) because we only care about + # updating `LIVE` rooms to `PREVIOUSLY`. The `PREVIOUSLY` rooms will just + # stay pointing at their previous position so we don't need to waste time + # checking those and since we default to `NEVER`, rooms that were `NEVER` + # sent before don't need to be recorded as we'll handle them correctly when + # they come into range for the first time. + rooms_no_receipts = [ + room_id + for room_id, room_status in previous_connection_state.receipts._statuses.items() + if room_status.status == HaveSentRoomFlag.LIVE + and room_id not in relevant_room_ids + ] + changed_rooms = await self.store.get_rooms_with_receipts_between( + rooms_no_receipts, + from_key=from_token.stream_token.receipt_key, + to_key=to_token.receipt_key, + ) + new_connection_state.receipts.record_unsent_rooms( + changed_rooms, from_token.stream_token.receipt_key + ) + + return SlidingSyncResult.Extensions.ReceiptsExtension( + room_id_to_receipt_map=room_id_to_receipt_map, + ) + + async def get_typing_extension_response( + self, + sync_config: SlidingSyncConfig, + actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], + typing_request: SlidingSyncConfig.Extensions.TypingExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.TypingExtension]: + """Handle Typing Notification extension (MSC3961) + + Args: + sync_config: Sync configuration + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + actual_room_response_map: A map of room ID to room results in the the + Sliding Sync response. + account_data_request: The account_data extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + # Skip if the extension is not enabled + if not typing_request.enabled: + return None + + relevant_room_ids = self.find_relevant_room_ids_for_extension( + requested_lists=typing_request.lists, + requested_room_ids=typing_request.rooms, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + ) + + room_id_to_typing_map: Dict[str, JsonMapping] = {} + if len(relevant_room_ids) > 0: + # Note: We don't need to take connection tracking into account for typing + # notifications because they'll get anything still relevant and hasn't timed + # out when the room comes into range. We consider the gap where the room + # fell out of range, as long enough for any typing notifications to have + # timed out (it's not worth the 30 seconds of data we may have missed). + typing_source = self.event_sources.sources.typing + typing_notifications, _ = await typing_source.get_new_events( + user=sync_config.user, + from_key=(from_token.stream_token.typing_key if from_token else 0), + to_key=to_token.typing_key, + # This is a dummy value and isn't used in the function + limit=0, + room_ids=relevant_room_ids, + is_guest=False, + ) + + for typing_notification in typing_notifications: + # These fields should exist for every typing notification + room_id = typing_notification["room_id"] + type = typing_notification["type"] + content = typing_notification["content"] + + room_id_to_typing_map[room_id] = {"type": type, "content": content} + + return SlidingSyncResult.Extensions.TypingExtension( + room_id_to_typing_map=room_id_to_typing_map, + ) diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py new file mode 100644 index 0000000000..3b727432fb --- /dev/null +++ b/synapse/handlers/sliding_sync/store.py @@ -0,0 +1,200 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + +import logging +from typing import TYPE_CHECKING, Dict, Optional, Tuple + +import attr + +from synapse.api.errors import SlidingSyncUnknownPosition +from synapse.handlers.sliding_sync.types import ( + MutablePerConnectionState, + PerConnectionState, +) +from synapse.logging.opentracing import trace +from synapse.types import SlidingSyncStreamToken +from synapse.types.handlers import SlidingSyncConfig + +if TYPE_CHECKING: + pass + +logger = logging.getLogger(__name__) + + +@attr.s(auto_attribs=True) +class SlidingSyncConnectionStore: + """In-memory store of per-connection state, including what rooms we have + previously sent down a sliding sync connection. + + Note: This is NOT safe to run in a worker setup because connection positions will + point to different sets of rooms on different workers. e.g. for the same connection, + a connection position of 5 might have totally different states on worker A and + worker B. + + One complication that we need to deal with here is needing to handle requests being + resent, i.e. if we sent down a room in a response that the client received, we must + consider the room *not* sent when we get the request again. + + This is handled by using an integer "token", which is returned to the client + as part of the sync token. For each connection we store a mapping from + tokens to the room states, and create a new entry when we send down new + rooms. + + Note that for any given sliding sync connection we will only store a maximum + of two different tokens: the previous token from the request and a new token + sent in the response. When we receive a request with a given token, we then + clear out all other entries with a different token. + + Attributes: + _connections: Mapping from `(user_id, conn_id)` to mapping of `token` + to mapping of room ID to `HaveSentRoom`. + """ + + # `(user_id, conn_id)` -> `connection_position` -> `PerConnectionState` + _connections: Dict[Tuple[str, str], Dict[int, PerConnectionState]] = attr.Factory( + dict + ) + + async def is_valid_token( + self, sync_config: SlidingSyncConfig, connection_token: int + ) -> bool: + """Return whether the connection token is valid/recognized""" + if connection_token == 0: + return True + + conn_key = self._get_connection_key(sync_config) + return connection_token in self._connections.get(conn_key, {}) + + async def get_per_connection_state( + self, + sync_config: SlidingSyncConfig, + from_token: Optional[SlidingSyncStreamToken], + ) -> PerConnectionState: + """Fetch the per-connection state for the token. + + Raises: + SlidingSyncUnknownPosition if the connection_token is unknown + """ + if from_token is None: + return PerConnectionState() + + connection_position = from_token.connection_position + if connection_position == 0: + # Initial sync (request without a `from_token`) starts at `0` so + # there is no existing per-connection state + return PerConnectionState() + + conn_key = self._get_connection_key(sync_config) + sync_statuses = self._connections.get(conn_key, {}) + connection_state = sync_statuses.get(connection_position) + + if connection_state is None: + raise SlidingSyncUnknownPosition() + + return connection_state + + @trace + async def record_new_state( + self, + sync_config: SlidingSyncConfig, + from_token: Optional[SlidingSyncStreamToken], + new_connection_state: MutablePerConnectionState, + ) -> int: + """Record updated per-connection state, returning the connection + position associated with the new state. + If there are no changes to the state this may return the same token as + the existing per-connection state. + """ + prev_connection_token = 0 + if from_token is not None: + prev_connection_token = from_token.connection_position + + if not new_connection_state.has_updates(): + return prev_connection_token + + conn_key = self._get_connection_key(sync_config) + sync_statuses = self._connections.setdefault(conn_key, {}) + + # Generate a new token, removing any existing entries in that token + # (which can happen if requests get resent). + new_store_token = prev_connection_token + 1 + sync_statuses.pop(new_store_token, None) + + # We copy the `MutablePerConnectionState` so that the inner `ChainMap`s + # don't grow forever. + sync_statuses[new_store_token] = new_connection_state.copy() + + return new_store_token + + @trace + async def mark_token_seen( + self, + sync_config: SlidingSyncConfig, + from_token: Optional[SlidingSyncStreamToken], + ) -> None: + """We have received a request with the given token, so we can clear out + any other tokens associated with the connection. + + If there is no from token then we have started afresh, and so we delete + all tokens associated with the device. + """ + # Clear out any tokens for the connection that doesn't match the one + # from the request. + + conn_key = self._get_connection_key(sync_config) + sync_statuses = self._connections.pop(conn_key, {}) + if from_token is None: + return + + sync_statuses = { + connection_token: room_statuses + for connection_token, room_statuses in sync_statuses.items() + if connection_token == from_token.connection_position + } + if sync_statuses: + self._connections[conn_key] = sync_statuses + + @staticmethod + def _get_connection_key(sync_config: SlidingSyncConfig) -> Tuple[str, str]: + """Return a unique identifier for this connection. + + The first part is simply the user ID. + + The second part is generally a combination of device ID and conn_id. + However, both these two are optional (e.g. puppet access tokens don't + have device IDs), so this handles those edge cases. + + We use this over the raw `conn_id` to avoid clashes between different + clients that use the same `conn_id`. Imagine a user uses a web client + that uses `conn_id: main_sync_loop` and an Android client that also has + a `conn_id: main_sync_loop`. + """ + + user_id = sync_config.user.to_string() + + # Only one sliding sync connection is allowed per given conn_id (empty + # or not). + conn_id = sync_config.conn_id or "" + + if sync_config.requester.device_id: + return (user_id, f"D/{sync_config.requester.device_id}/{conn_id}") + + if sync_config.requester.access_token_id: + # If we don't have a device, then the access token ID should be a + # stable ID. + return (user_id, f"A/{sync_config.requester.access_token_id}/{conn_id}") + + # If we have neither then its likely an AS or some weird token. Either + # way we can just fail here. + raise Exception("Cannot use sliding sync with access token type") diff --git a/synapse/handlers/sliding_sync/types.py b/synapse/handlers/sliding_sync/types.py new file mode 100644 index 0000000000..003419d40a --- /dev/null +++ b/synapse/handlers/sliding_sync/types.py @@ -0,0 +1,506 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + +import logging +import typing +from collections import ChainMap +from enum import Enum +from typing import ( + TYPE_CHECKING, + Callable, + Dict, + Final, + Generic, + Mapping, + MutableMapping, + Optional, + Set, + TypeVar, + cast, +) + +import attr + +from synapse.api.constants import EventTypes +from synapse.types import MultiWriterStreamToken, RoomStreamToken, StrCollection, UserID +from synapse.types.handlers import SlidingSyncConfig + +if TYPE_CHECKING: + pass + +logger = logging.getLogger(__name__) + + +class StateValues: + """ + Understood values of the (type, state_key) tuple in `required_state`. + """ + + # Include all state events of the given type + WILDCARD: Final = "*" + # Lazy-load room membership events (include room membership events for any event + # `sender` in the timeline). We only give special meaning to this value when it's a + # `state_key`. + LAZY: Final = "$LAZY" + # Subsitute with the requester's user ID. Typically used by clients to get + # the user's membership. + ME: Final = "$ME" + + +# We can't freeze this class because we want to update it in place with the +# de-duplicated data. +@attr.s(slots=True, auto_attribs=True) +class RoomSyncConfig: + """ + Holds the config for what data we should fetch for a room in the sync response. + + Attributes: + timeline_limit: The maximum number of events to return in the timeline. + + required_state_map: Map from state event type to state_keys requested for the + room. The values are close to `StateKey` but actually use a syntax where you + can provide `*` wildcard and `$LAZY` for lazy-loading room members. + """ + + timeline_limit: int + required_state_map: Dict[str, Set[str]] + + @classmethod + def from_room_config( + cls, + room_params: SlidingSyncConfig.CommonRoomParameters, + ) -> "RoomSyncConfig": + """ + Create a `RoomSyncConfig` from a `SlidingSyncList`/`RoomSubscription` config. + + Args: + room_params: `SlidingSyncConfig.SlidingSyncList` or `SlidingSyncConfig.RoomSubscription` + """ + required_state_map: Dict[str, Set[str]] = {} + for ( + state_type, + state_key, + ) in room_params.required_state: + # If we already have a wildcard for this specific `state_key`, we don't need + # to add it since the wildcard already covers it. + if state_key in required_state_map.get(StateValues.WILDCARD, set()): + continue + + # If we already have a wildcard `state_key` for this `state_type`, we don't need + # to add anything else + if StateValues.WILDCARD in required_state_map.get(state_type, set()): + continue + + # If we're getting wildcards for the `state_type` and `state_key`, that's + # all that matters so get rid of any other entries + if state_type == StateValues.WILDCARD and state_key == StateValues.WILDCARD: + required_state_map = {StateValues.WILDCARD: {StateValues.WILDCARD}} + # We can break, since we don't need to add anything else + break + + # If we're getting a wildcard for the `state_type`, get rid of any other + # entries with the same `state_key`, since the wildcard will cover it already. + elif state_type == StateValues.WILDCARD: + # Get rid of any entries that match the `state_key` + # + # Make a copy so we don't run into an error: `dictionary changed size + # during iteration`, when we remove items + for ( + existing_state_type, + existing_state_key_set, + ) in list(required_state_map.items()): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for existing_state_key in existing_state_key_set.copy(): + if existing_state_key == state_key: + existing_state_key_set.remove(state_key) + + # If we've the left the `set()` empty, remove it from the map + if existing_state_key_set == set(): + required_state_map.pop(existing_state_type, None) + + # If we're getting a wildcard `state_key`, get rid of any other state_keys + # for this `state_type` since the wildcard will cover it already. + if state_key == StateValues.WILDCARD: + required_state_map[state_type] = {state_key} + # Otherwise, just add it to the set + else: + if required_state_map.get(state_type) is None: + required_state_map[state_type] = {state_key} + else: + required_state_map[state_type].add(state_key) + + return cls( + timeline_limit=room_params.timeline_limit, + required_state_map=required_state_map, + ) + + def deep_copy(self) -> "RoomSyncConfig": + required_state_map: Dict[str, Set[str]] = { + state_type: state_key_set.copy() + for state_type, state_key_set in self.required_state_map.items() + } + + return RoomSyncConfig( + timeline_limit=self.timeline_limit, + required_state_map=required_state_map, + ) + + def combine_room_sync_config( + self, other_room_sync_config: "RoomSyncConfig" + ) -> None: + """ + Combine this `RoomSyncConfig` with another `RoomSyncConfig` and take the + superset union of the two. + """ + # Take the highest timeline limit + if self.timeline_limit < other_room_sync_config.timeline_limit: + self.timeline_limit = other_room_sync_config.timeline_limit + + # Union the required state + for ( + state_type, + state_key_set, + ) in other_room_sync_config.required_state_map.items(): + # If we already have a wildcard for everything, we don't need to add + # anything else + if StateValues.WILDCARD in self.required_state_map.get( + StateValues.WILDCARD, set() + ): + break + + # If we already have a wildcard `state_key` for this `state_type`, we don't need + # to add anything else + if StateValues.WILDCARD in self.required_state_map.get(state_type, set()): + continue + + # If we're getting wildcards for the `state_type` and `state_key`, that's + # all that matters so get rid of any other entries + if ( + state_type == StateValues.WILDCARD + and StateValues.WILDCARD in state_key_set + ): + self.required_state_map = {state_type: {StateValues.WILDCARD}} + # We can break, since we don't need to add anything else + break + + for state_key in state_key_set: + # If we already have a wildcard for this specific `state_key`, we don't need + # to add it since the wildcard already covers it. + if state_key in self.required_state_map.get( + StateValues.WILDCARD, set() + ): + continue + + # If we're getting a wildcard for the `state_type`, get rid of any other + # entries with the same `state_key`, since the wildcard will cover it already. + if state_type == StateValues.WILDCARD: + # Get rid of any entries that match the `state_key` + # + # Make a copy so we don't run into an error: `dictionary changed size + # during iteration`, when we remove items + for existing_state_type, existing_state_key_set in list( + self.required_state_map.items() + ): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for existing_state_key in existing_state_key_set.copy(): + if existing_state_key == state_key: + existing_state_key_set.remove(state_key) + + # If we've the left the `set()` empty, remove it from the map + if existing_state_key_set == set(): + self.required_state_map.pop(existing_state_type, None) + + # If we're getting a wildcard `state_key`, get rid of any other state_keys + # for this `state_type` since the wildcard will cover it already. + if state_key == StateValues.WILDCARD: + self.required_state_map[state_type] = {state_key} + break + # Otherwise, just add it to the set + else: + if self.required_state_map.get(state_type) is None: + self.required_state_map[state_type] = {state_key} + else: + self.required_state_map[state_type].add(state_key) + + def must_await_full_state( + self, + is_mine_id: Callable[[str], bool], + ) -> bool: + """ + Check if we have a we're only requesting `required_state` which is completely + satisfied even with partial state, then we don't need to `await_full_state` before + we can return it. + + Also see `StateFilter.must_await_full_state(...)` for comparison + + Partially-stated rooms should have all state events except for remote membership + events so if we require a remote membership event anywhere, then we need to + return `True` (requires full state). + + Args: + is_mine_id: a callable which confirms if a given state_key matches a mxid + of a local user + """ + wildcard_state_keys = self.required_state_map.get(StateValues.WILDCARD) + # Requesting *all* state in the room so we have to wait + if ( + wildcard_state_keys is not None + and StateValues.WILDCARD in wildcard_state_keys + ): + return True + + # If the wildcards don't refer to remote user IDs, then we don't need to wait + # for full state. + if wildcard_state_keys is not None: + for possible_user_id in wildcard_state_keys: + if not possible_user_id[0].startswith(UserID.SIGIL): + # Not a user ID + continue + + localpart_hostname = possible_user_id.split(":", 1) + if len(localpart_hostname) < 2: + # Not a user ID + continue + + if not is_mine_id(possible_user_id): + return True + + membership_state_keys = self.required_state_map.get(EventTypes.Member) + # We aren't requesting any membership events at all so the partial state will + # cover us. + if membership_state_keys is None: + return False + + # If we're requesting entirely local users, the partial state will cover us. + for user_id in membership_state_keys: + if user_id == StateValues.ME: + continue + # We're lazy-loading membership so we can just return the state we have. + # Lazy-loading means we include membership for any event `sender` in the + # timeline but since we had to auth those timeline events, we will have the + # membership state for them (including from remote senders). + elif user_id == StateValues.LAZY: + continue + elif user_id == StateValues.WILDCARD: + return False + elif not is_mine_id(user_id): + return True + + # Local users only so the partial state will cover us. + return False + + +class HaveSentRoomFlag(Enum): + """Flag for whether we have sent the room down a sliding sync connection. + + The valid state changes here are: + NEVER -> LIVE + LIVE -> PREVIOUSLY + PREVIOUSLY -> LIVE + """ + + # The room has never been sent down (or we have forgotten we have sent it + # down). + NEVER = "never" + + # We have previously sent the room down, but there are updates that we + # haven't sent down. + PREVIOUSLY = "previously" + + # We have sent the room down and the client has received all updates. + LIVE = "live" + + +T = TypeVar("T") + + +@attr.s(auto_attribs=True, slots=True, frozen=True) +class HaveSentRoom(Generic[T]): + """Whether we have sent the room data down a sliding sync connection. + + We are generic over the type of token used, e.g. `RoomStreamToken` or + `MultiWriterStreamToken`. + + Attributes: + status: Flag of if we have or haven't sent down the room + last_token: If the flag is `PREVIOUSLY` then this is non-null and + contains the last stream token of the last updates we sent down + the room, i.e. we still need to send everything since then to the + client. + """ + + status: HaveSentRoomFlag + last_token: Optional[T] + + @staticmethod + def live() -> "HaveSentRoom[T]": + return HaveSentRoom(HaveSentRoomFlag.LIVE, None) + + @staticmethod + def previously(last_token: T) -> "HaveSentRoom[T]": + """Constructor for `PREVIOUSLY` flag.""" + return HaveSentRoom(HaveSentRoomFlag.PREVIOUSLY, last_token) + + @staticmethod + def never() -> "HaveSentRoom[T]": + return HaveSentRoom(HaveSentRoomFlag.NEVER, None) + + +@attr.s(auto_attribs=True, slots=True, frozen=True) +class RoomStatusMap(Generic[T]): + """For a given stream, e.g. events, records what we have or have not sent + down for that stream in a given room.""" + + # `room_id` -> `HaveSentRoom` + _statuses: Mapping[str, HaveSentRoom[T]] = attr.Factory(dict) + + def have_sent_room(self, room_id: str) -> HaveSentRoom[T]: + """Return whether we have previously sent the room down""" + return self._statuses.get(room_id, HaveSentRoom.never()) + + def get_mutable(self) -> "MutableRoomStatusMap[T]": + """Get a mutable copy of this state.""" + return MutableRoomStatusMap( + statuses=self._statuses, + ) + + def copy(self) -> "RoomStatusMap[T]": + """Make a copy of the class. Useful for converting from a mutable to + immutable version.""" + + return RoomStatusMap(statuses=dict(self._statuses)) + + +class MutableRoomStatusMap(RoomStatusMap[T]): + """A mutable version of `RoomStatusMap`""" + + # We use a ChainMap here so that we can easily track what has been updated + # and what hasn't. Note that when we persist the per connection state this + # will get flattened to a normal dict (via calling `.copy()`) + _statuses: typing.ChainMap[str, HaveSentRoom[T]] + + def __init__( + self, + statuses: Mapping[str, HaveSentRoom[T]], + ) -> None: + # ChainMap requires a mutable mapping, but we're not actually going to + # mutate it. + statuses = cast(MutableMapping, statuses) + + super().__init__( + statuses=ChainMap({}, statuses), + ) + + def get_updates(self) -> Mapping[str, HaveSentRoom[T]]: + """Return only the changes that were made""" + return self._statuses.maps[0] + + def record_sent_rooms(self, room_ids: StrCollection) -> None: + """Record that we have sent these rooms in the response""" + for room_id in room_ids: + current_status = self._statuses.get(room_id, HaveSentRoom.never()) + if current_status.status == HaveSentRoomFlag.LIVE: + continue + + self._statuses[room_id] = HaveSentRoom.live() + + def record_unsent_rooms(self, room_ids: StrCollection, from_token: T) -> None: + """Record that we have not sent these rooms in the response, but there + have been updates. + """ + # Whether we add/update the entries for unsent rooms depends on the + # existing entry: + # - LIVE: We have previously sent down everything up to + # `last_room_token, so we update the entry to be `PREVIOUSLY` with + # `last_room_token`. + # - PREVIOUSLY: We have previously sent down everything up to *a* + # given token, so we don't need to update the entry. + # - NEVER: We have never previously sent down the room, and we haven't + # sent anything down this time either so we leave it as NEVER. + + for room_id in room_ids: + current_status = self._statuses.get(room_id, HaveSentRoom.never()) + if current_status.status != HaveSentRoomFlag.LIVE: + continue + + self._statuses[room_id] = HaveSentRoom.previously(from_token) + + +@attr.s(auto_attribs=True) +class PerConnectionState: + """The per-connection state. A snapshot of what we've sent down the + connection before. + + Currently, we track whether we've sent down various aspects of a given room + before. + + We use the `rooms` field to store the position in the events stream for each + room that we've previously sent to the client before. On the next request + that includes the room, we can then send only what's changed since that + recorded position. + + Same goes for the `receipts` field so we only need to send the new receipts + since the last time you made a sync request. + + Attributes: + rooms: The status of each room for the events stream. + receipts: The status of each room for the receipts stream. + room_configs: Map from room_id to the `RoomSyncConfig` of all + rooms that we have previously sent down. + """ + + rooms: RoomStatusMap[RoomStreamToken] = attr.Factory(RoomStatusMap) + receipts: RoomStatusMap[MultiWriterStreamToken] = attr.Factory(RoomStatusMap) + + room_configs: Mapping[str, RoomSyncConfig] = attr.Factory(dict) + + def get_mutable(self) -> "MutablePerConnectionState": + """Get a mutable copy of this state.""" + room_configs = cast(MutableMapping[str, RoomSyncConfig], self.room_configs) + + return MutablePerConnectionState( + rooms=self.rooms.get_mutable(), + receipts=self.receipts.get_mutable(), + room_configs=ChainMap({}, room_configs), + ) + + def copy(self) -> "PerConnectionState": + return PerConnectionState( + rooms=self.rooms.copy(), + receipts=self.receipts.copy(), + room_configs=dict(self.room_configs), + ) + + +@attr.s(auto_attribs=True) +class MutablePerConnectionState(PerConnectionState): + """A mutable version of `PerConnectionState`""" + + rooms: MutableRoomStatusMap[RoomStreamToken] + receipts: MutableRoomStatusMap[MultiWriterStreamToken] + + room_configs: typing.ChainMap[str, RoomSyncConfig] + + def has_updates(self) -> bool: + return ( + bool(self.rooms.get_updates()) + or bool(self.receipts.get_updates()) + or bool(self.get_room_config_updates()) + ) + + def get_room_config_updates(self) -> Mapping[str, RoomSyncConfig]: + """Get updates to the room sync config""" + return self.room_configs.maps[0] From ad0ee53993a412c88ba0ae4b314992d961771882 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:52:16 +0100 Subject: [PATCH 144/332] Bump serde from 1.0.206 to 1.0.208 (#17581) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d74309a756..3e12dfc7f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.206" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b3e4cd94123dd520a128bcd11e34d9e9e423e7e3e50425cb1b4b1e3549d0284" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.206" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabfb6138d2383ea8208cf98ccf69cdfb1aff4088460681d84189aa259762f97" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2", "quote", From ad2cd9aefd0d5bec6cecc3c9f29505993256eef0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:52:26 +0100 Subject: [PATCH 145/332] Bump serde_json from 1.0.124 to 1.0.125 (#17582) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e12dfc7f5..84d2768133 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.124" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" +checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" dependencies = [ "itoa", "memchr", From 87d13fd143f2801ac4a3166e4505f6312d2d0e63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:52:34 +0100 Subject: [PATCH 146/332] Bump types-jsonschema from 4.23.0.20240712 to 4.23.0.20240813 (#17583) --- poetry.lock | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index d476973ead..798bcf207a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2099,6 +2099,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2106,8 +2107,16 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2124,6 +2133,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2131,6 +2141,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -2802,13 +2813,13 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.23.0.20240712" +version = "4.23.0.20240813" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.8" files = [ - {file = "types-jsonschema-4.23.0.20240712.tar.gz", hash = "sha256:b20db728dcf7ea3e80e9bdeb55e8b8420c6c040cda14e8cf284465adee71d217"}, - {file = "types_jsonschema-4.23.0.20240712-py3-none-any.whl", hash = "sha256:8c33177ce95336241c1d61ccb56a9964d4361b99d5f1cd81a1ab4909b0dd7cf4"}, + {file = "types-jsonschema-4.23.0.20240813.tar.gz", hash = "sha256:c93f48206f209a5bc4608d295ac39f172fb98b9e24159ce577dbd25ddb79a1c0"}, + {file = "types_jsonschema-4.23.0.20240813-py3-none-any.whl", hash = "sha256:be283e23f0b87547316c2ee6b0fd36d95ea30e921db06478029e10b5b6aa6ac3"}, ] [package.dependencies] From 74b75cfd54c95ed70321436ecea959115fdeb5ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:52:53 +0100 Subject: [PATCH 147/332] Bump cryptography from 42.0.8 to 43.0.0 (#17584) --- poetry.lock | 63 ++++++++++++++++++++++++----------------------------- 1 file changed, 29 insertions(+), 34 deletions(-) diff --git a/poetry.lock b/poetry.lock index 798bcf207a..b2b66b06d3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -403,43 +403,38 @@ files = [ [[package]] name = "cryptography" -version = "42.0.8" +version = "43.0.0" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"}, - {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"}, - {file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"}, - {file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"}, - {file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"}, - {file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"}, - {file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"}, - {file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"}, + {file = "cryptography-43.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf"}, + {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55"}, + {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431"}, + {file = "cryptography-43.0.0-cp37-abi3-win32.whl", hash = "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc"}, + {file = "cryptography-43.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778"}, + {file = "cryptography-43.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f"}, + {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0"}, + {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b"}, + {file = "cryptography-43.0.0-cp39-abi3-win32.whl", hash = "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf"}, + {file = "cryptography-43.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1"}, + {file = "cryptography-43.0.0.tar.gz", hash = "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e"}, ] [package.dependencies] @@ -452,7 +447,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "cryptography-vectors (==43.0.0)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] From cb9fa062b774510582130a7f4fb713f8d18730f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:53:06 +0100 Subject: [PATCH 148/332] Bump sentry-sdk from 2.12.0 to 2.13.0 (#17585) --- poetry.lock | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index b2b66b06d3..7c5d2b7b2c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2409,13 +2409,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.12.0" +version = "2.13.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.12.0-py2.py3-none-any.whl", hash = "sha256:7a8d5163d2ba5c5f4464628c6b68f85e86972f7c636acc78aed45c61b98b7a5e"}, - {file = "sentry_sdk-2.12.0.tar.gz", hash = "sha256:8763840497b817d44c49b3fe3f5f7388d083f2337ffedf008b2cdb63b5c86dc6"}, + {file = "sentry_sdk-2.13.0-py2.py3-none-any.whl", hash = "sha256:6beede8fc2ab4043da7f69d95534e320944690680dd9a963178a49de71d726c6"}, + {file = "sentry_sdk-2.13.0.tar.gz", hash = "sha256:8d4a576f7a98eb2fdb40e13106e41f330e5c79d72a68be1316e7852cf4995260"}, ] [package.dependencies] @@ -2442,6 +2442,7 @@ httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] huggingface-hub = ["huggingface-hub (>=0.22)"] langchain = ["langchain (>=0.0.210)"] +litestar = ["litestar (>=2.0.0)"] loguru = ["loguru (>=0.5)"] openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] From f1a1c7fc537514ce5919d04b492ad3b2dba74646 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:53:14 +0100 Subject: [PATCH 149/332] Bump types-setuptools from 71.1.0.20240726 to 71.1.0.20240818 (#17586) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7c5d2b7b2c..c467ca8dc2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2907,13 +2907,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "71.1.0.20240726" +version = "71.1.0.20240818" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types-setuptools-71.1.0.20240726.tar.gz", hash = "sha256:85ba28e9461bb1be86ebba4db0f1c2408f2b11115b1966334ea9dc464e29303e"}, - {file = "types_setuptools-71.1.0.20240726-py3-none-any.whl", hash = "sha256:a7775376f36e0ff09bcad236bf265777590a66b11623e48c20bfc30f1444ea36"}, + {file = "types-setuptools-71.1.0.20240818.tar.gz", hash = "sha256:f62eaffaa39774462c65fbb49368c4dc1d91a90a28371cb14e1af090ff0e41e3"}, + {file = "types_setuptools-71.1.0.20240818-py3-none-any.whl", hash = "sha256:c4f95302f88369ac0ac46c67ddbfc70c6c4dbbb184d9fed356244217a2934025"}, ] [[package]] From b4d95409fbecbb10a6c3c3adb316ff8f49fbdd09 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Aug 2024 11:47:28 +0100 Subject: [PATCH 150/332] Fix @tag_args for non-methods (#17604) The decorator assumed we were always wrapping function methods --- changelog.d/17604.misc | 1 + synapse/logging/opentracing.py | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/17604.misc diff --git a/changelog.d/17604.misc b/changelog.d/17604.misc new file mode 100644 index 0000000000..96cb213bbd --- /dev/null +++ b/changelog.d/17604.misc @@ -0,0 +1 @@ +Add support to `@tag_args` for standalone functions. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 7a3c805cc5..e32b3f6781 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -1032,13 +1032,13 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: def _wrapping_logic( _func: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> Generator[None, None, None]: - # We use `[1:]` to skip the `self` object reference and `start=1` to - # make the index line up with `argspec.args`. - # - # FIXME: We could update this to handle any type of function by ignoring the - # first argument only if it's named `self` or `cls`. This isn't fool-proof - # but handles the idiomatic cases. - for i, arg in enumerate(args[1:], start=1): + for i, arg in enumerate(args, start=0): + if argspec.args[i] in ("self", "cls"): + # Ignore `self` and `cls` values. Ideally we'd properly detect + # if we were wrapping a method, but that is really non-trivial + # and this is good enough. + continue + set_tag(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg)) set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :])) set_tag(SynapseTags.FUNC_KWARGS, str(kwargs)) From defd4aca67ecd93c6513bf710d3a6fa63c2b2425 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Aug 2024 12:03:56 +0100 Subject: [PATCH 151/332] Speed up fetching latest stream positions via cache (#17606) The idea is to engineer it so that the vast majority of the rooms can stay in the cache, so we can just ignore them. --- changelog.d/17606.misc | 1 + synapse/storage/databases/main/cache.py | 6 + synapse/storage/databases/main/stream.py | 137 ++++++++++++----------- 3 files changed, 77 insertions(+), 67 deletions(-) create mode 100644 changelog.d/17606.misc diff --git a/changelog.d/17606.misc b/changelog.d/17606.misc new file mode 100644 index 0000000000..47634b1305 --- /dev/null +++ b/changelog.d/17606.misc @@ -0,0 +1 @@ +Speed up incremental syncs in sliding sync by adding some more caching. diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 63624f3e8f..246d2acc2f 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -313,6 +313,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "get_unread_event_push_actions_by_room_for_user", (room_id,) ) + self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,)) + # The `_get_membership_from_event_id` is immutable, except for the # case where we look up an event *before* persisting it. self._attempt_to_invalidate_cache("_get_membership_from_event_id", (event_id,)) @@ -404,6 +406,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): ) self._attempt_to_invalidate_cache("get_relations_for_event", (room_id,)) + self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,)) + self._attempt_to_invalidate_cache("_get_membership_from_event_id", None) self._attempt_to_invalidate_cache("get_applicable_edit", None) self._attempt_to_invalidate_cache("get_thread_id", None) @@ -476,6 +480,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_room_type", (room_id,)) self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) + self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,)) + # And delete state caches. self._invalidate_state_caches_all(room_id) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 4989c960a6..e33a8cfe97 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -50,6 +50,7 @@ from typing import ( Dict, Iterable, List, + Mapping, Optional, Protocol, Set, @@ -80,7 +81,7 @@ from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import PersistedEventPosition, RoomStreamToken, StrCollection -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.cancellation import cancellable from synapse.util.iterutils import batch_iter @@ -1381,8 +1382,52 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): rooms """ + # First we just get the latest positions for the room, as the vast + # majority of them will be before the given end token anyway. By doing + # this we can cache most rooms. + uncapped_results = await self._bulk_get_max_event_pos(room_ids) + + # Check that the stream position for the rooms are from before the + # minimum position of the token. If not then we need to fetch more + # rows. + results: Dict[str, int] = {} + recheck_rooms: Set[str] = set() min_token = end_token.stream - max_token = end_token.get_max_stream_pos() + for room_id, stream in uncapped_results.items(): + if stream <= min_token: + results[room_id] = stream + else: + recheck_rooms.add(room_id) + + if not recheck_rooms: + return results + + # There shouldn't be many rooms that we need to recheck, so we do them + # one-by-one. + for room_id in recheck_rooms: + result = await self.get_last_event_pos_in_room_before_stream_ordering( + room_id, end_token + ) + if result is not None: + results[room_id] = result[1].stream + + return results + + @cached() + async def _get_max_event_pos(self, room_id: str) -> int: + raise NotImplementedError() + + @cachedList(cached_method_name="_get_max_event_pos", list_name="room_ids") + async def _bulk_get_max_event_pos( + self, room_ids: StrCollection + ) -> Mapping[str, int]: + """Fetch the max position of a persisted event in the room.""" + + # We need to be careful not to return positions ahead of the current + # positions, so we get the current token now and cap our queries to it. + now_token = self.get_room_max_token() + max_pos = now_token.get_max_stream_pos() + results: Dict[str, int] = {} # First, we check for the rooms in the stream change cache to see if we @@ -1390,31 +1435,32 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): missing_room_ids: Set[str] = set() for room_id in room_ids: stream_pos = self._events_stream_cache.get_max_pos_of_last_change(room_id) - if stream_pos and stream_pos <= min_token: + if stream_pos is not None: results[room_id] = stream_pos else: missing_room_ids.add(room_id) + if not missing_room_ids: + return results + # Next, we query the stream position from the DB. At first we fetch all # positions less than the *max* stream pos in the token, then filter # them down. We do this as a) this is a cheaper query, and b) the vast # majority of rooms will have a latest token from before the min stream # pos. - def bulk_get_last_event_pos_txn( - txn: LoggingTransaction, batch_room_ids: StrCollection + def bulk_get_max_event_pos_txn( + txn: LoggingTransaction, batched_room_ids: StrCollection ) -> Dict[str, int]: - # This query fetches the latest stream position in the rooms before - # the given max position. clause, args = make_in_list_sql_clause( - self.database_engine, "room_id", batch_room_ids + self.database_engine, "room_id", batched_room_ids ) sql = f""" SELECT room_id, ( SELECT stream_ordering FROM events AS e LEFT JOIN rejections USING (event_id) WHERE e.room_id = r.room_id - AND stream_ordering <= ? + AND e.stream_ordering <= ? AND NOT outlier AND rejection_reason IS NULL ORDER BY stream_ordering DESC @@ -1423,72 +1469,29 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): FROM rooms AS r WHERE {clause} """ - txn.execute(sql, [max_token] + args) + txn.execute(sql, [max_pos] + args) return {row[0]: row[1] for row in txn} recheck_rooms: Set[str] = set() - for batched in batch_iter(missing_room_ids, 1000): - result = await self.db_pool.runInteraction( - "bulk_get_last_event_pos_in_room_before_stream_ordering", - bulk_get_last_event_pos_txn, - batched, + for batched in batch_iter(room_ids, 1000): + batch_results = await self.db_pool.runInteraction( + "_bulk_get_max_event_pos", bulk_get_max_event_pos_txn, batched ) - - # Check that the stream position for the rooms are from before the - # minimum position of the token. If not then we need to fetch more - # rows. - for room_id, stream in result.items(): - if stream <= min_token: - results[room_id] = stream + for room_id, stream_ordering in batch_results.items(): + if stream_ordering <= now_token.stream: + results.update(batch_results) else: recheck_rooms.add(room_id) - if not recheck_rooms: - return results - - # For the remaining rooms we need to fetch all rows between the min and - # max stream positions in the end token, and filter out the rows that - # are after the end token. - # - # This query should be fast as the range between the min and max should - # be small. - - def bulk_get_last_event_pos_recheck_txn( - txn: LoggingTransaction, batch_room_ids: StrCollection - ) -> Dict[str, int]: - clause, args = make_in_list_sql_clause( - self.database_engine, "room_id", batch_room_ids + # We now need to handle rooms where the above query returned a stream + # position that was potentially too new. This should happen very rarely + # so we just query the rooms one-by-one + for room_id in recheck_rooms: + result = await self.get_last_event_pos_in_room_before_stream_ordering( + room_id, now_token ) - sql = f""" - SELECT room_id, instance_name, stream_ordering - FROM events - WHERE ? < stream_ordering AND stream_ordering <= ? - AND NOT outlier - AND rejection_reason IS NULL - AND {clause} - ORDER BY stream_ordering ASC - """ - txn.execute(sql, [min_token, max_token] + args) - - # We take the max stream ordering that is less than the token. Since - # we ordered by stream ordering we just need to iterate through and - # take the last matching stream ordering. - txn_results: Dict[str, int] = {} - for row in txn: - room_id = row[0] - event_pos = PersistedEventPosition(row[1], row[2]) - if not event_pos.persisted_after(end_token): - txn_results[room_id] = event_pos.stream - - return txn_results - - for batched in batch_iter(recheck_rooms, 1000): - recheck_result = await self.db_pool.runInteraction( - "bulk_get_last_event_pos_in_room_before_stream_ordering_recheck", - bulk_get_last_event_pos_recheck_txn, - batched, - ) - results.update(recheck_result) + if result is not None: + results[room_id] = result[1].stream return results From a2b2f6d09b1dfceabd0c5191e8198371bcf7c236 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 14:55:03 +0100 Subject: [PATCH 152/332] Bump serde_json from 1.0.125 to 1.0.127 (#17614) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84d2768133..d3ac1dae7d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.125" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ "itoa", "memchr", From 940b93240586787838fd6588f233f0caa88694ec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 14:55:27 +0100 Subject: [PATCH 153/332] Bump pygithub from 2.3.0 to 2.4.0 (#17612) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index c467ca8dc2..c499041c85 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1877,13 +1877,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pygithub" -version = "2.3.0" +version = "2.4.0" description = "Use the full Github API v3" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "PyGithub-2.3.0-py3-none-any.whl", hash = "sha256:65b499728be3ce7b0cd2cd760da3b32f0f4d7bc55e5e0677617f90f6564e793e"}, - {file = "PyGithub-2.3.0.tar.gz", hash = "sha256:0148d7347a1cdeed99af905077010aef81a4dad988b0ba51d4108bf66b443f7e"}, + {file = "PyGithub-2.4.0-py3-none-any.whl", hash = "sha256:81935aa4bdc939fba98fee1cb47422c09157c56a27966476ff92775602b9ee24"}, + {file = "pygithub-2.4.0.tar.gz", hash = "sha256:6601e22627e87bac192f1e2e39c6e6f69a43152cfb8f307cee575879320b3051"}, ] [package.dependencies] From 48742da536ce6b1fd0713fff6b02387d9be54d1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 14:55:38 +0100 Subject: [PATCH 154/332] Bump attrs from 23.2.0 to 24.2.0 (#17609) --- poetry.lock | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/poetry.lock b/poetry.lock index c499041c85..161fdf0180 100644 --- a/poetry.lock +++ b/poetry.lock @@ -16,22 +16,22 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "attrs" -version = "23.2.0" +version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "authlib" From cca77af68ff681b7c06d22535e73174dea5971f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 14:55:47 +0100 Subject: [PATCH 155/332] Bump phonenumbers from 8.13.43 to 8.13.44 (#17610) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 161fdf0180..e2cbb6450a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1507,13 +1507,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.43" +version = "8.13.44" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.43-py2.py3-none-any.whl", hash = "sha256:339e521403fe4dd9c664dbbeb2fe434f9ea5c81e54c0fdfadbaeb53b26a76c27"}, - {file = "phonenumbers-8.13.43.tar.gz", hash = "sha256:35b904e4a79226eee027fbb467a9aa6f1ab9ffc3c09c91bf14b885c154936726"}, + {file = "phonenumbers-8.13.44-py2.py3-none-any.whl", hash = "sha256:52cd02865dab1428ca9e89d442629b61d407c7dc687cfb80a3e8d068a584513c"}, + {file = "phonenumbers-8.13.44.tar.gz", hash = "sha256:2175021e84ee4e41b43c890f2d0af51f18c6ca9ad525886d6d6e4ea882e46fac"}, ] [[package]] From d9cc0faf4b876d3b8d63017c11108948fc698f03 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 14:55:56 +0100 Subject: [PATCH 156/332] Bump pyyaml from 6.0.1 to 6.0.2 (#17611) --- poetry.lock | 108 ++++++++++++++++++++++++++-------------------------- 1 file changed, 55 insertions(+), 53 deletions(-) diff --git a/poetry.lock b/poetry.lock index e2cbb6450a..0fffa8e9ba 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2084,62 +2084,64 @@ files = [ [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] From 8da16e55fed6c880a61140e7375eac2e31494f81 Mon Sep 17 00:00:00 2001 From: eyJhb Date: Tue, 27 Aug 2024 19:51:43 +0200 Subject: [PATCH 157/332] hash_password accepts stdin now (#17608) `hash_password` now actually accepts password from stdin. The `getpass` reads from TTY, and does NOT accept stdin in any way. The manpage has been updated to reflect that. --- changelog.d/17608.feature | 1 + debian/hash_password.1 | 27 +++-- debian/hash_password.1.html | 182 ++++++++++++++++++++++++++++++ debian/hash_password.ronn | 13 ++- synapse/_scripts/hash_password.py | 4 +- 5 files changed, 216 insertions(+), 11 deletions(-) create mode 100644 changelog.d/17608.feature create mode 100644 debian/hash_password.1.html diff --git a/changelog.d/17608.feature b/changelog.d/17608.feature new file mode 100644 index 0000000000..adf9ac5533 --- /dev/null +++ b/changelog.d/17608.feature @@ -0,0 +1 @@ +Make `hash_password` accept password input from stdin. \ No newline at end of file diff --git a/debian/hash_password.1 b/debian/hash_password.1 index 39fa3ffcbf..af55e09c45 100644 --- a/debian/hash_password.1 +++ b/debian/hash_password.1 @@ -1,10 +1,13 @@ -.\" generated with Ronn-NG/v0.8.0 -.\" http://github.com/apjanke/ronn-ng/tree/0.8.0 -.TH "HASH_PASSWORD" "1" "July 2021" "" "" +.\" generated with Ronn-NG/v0.10.1 +.\" http://github.com/apjanke/ronn-ng/tree/0.10.1 +.TH "HASH_PASSWORD" "1" "August 2024" "" .SH "NAME" \fBhash_password\fR \- Calculate the hash of a new password, so that passwords can be reset .SH "SYNOPSIS" -\fBhash_password\fR [\fB\-p\fR|\fB\-\-password\fR [password]] [\fB\-c\fR|\fB\-\-config\fR \fIfile\fR] +.TS +allbox; +\fBhash_password\fR [\fB\-p\fR \fB\-\-password\fR [password]] [\fB\-c\fR \fB\-\-config\fR \fIfile\fR] +.TE .SH "DESCRIPTION" \fBhash_password\fR calculates the hash of a supplied password using bcrypt\. .P @@ -20,7 +23,7 @@ bcrypt_rounds: 17 password_config: pepper: "random hashing pepper" .SH "OPTIONS" .TP \fB\-p\fR, \fB\-\-password\fR -Read the password form the command line if [password] is supplied\. If not, prompt the user and read the password form the \fBSTDIN\fR\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\. +Read the password form the command line if [password] is supplied, or from \fBSTDIN\fR\. If not, prompt the user and read the password from the tty prompt\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\. .TP \fB\-c\fR, \fB\-\-config\fR Read the supplied YAML \fIfile\fR containing the options \fBbcrypt_rounds\fR and the \fBpassword_config\fR section containing the \fBpepper\fR value\. @@ -33,7 +36,17 @@ $2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8\.X8fWFpum7SxZ9MFe .fi .IP "" 0 .P -Hash from the STDIN: +Hash from the stdin: +.IP "" 4 +.nf +$ cat password_file | hash_password +Password: +Confirm password: +$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX\.rcuAbM8ErLoUhybG +.fi +.IP "" 0 +.P +Hash from the prompt: .IP "" 4 .nf $ hash_password @@ -53,6 +66,6 @@ $2b$12$CwI\.wBNr\.w3kmiUlV3T5s\.GT2wH7uebDCovDrCOh18dFedlANK99O .fi .IP "" 0 .SH "COPYRIGHT" -This man page was written by Rahul De <\fI\%mailto:rahulde@swecha\.net\fR> for Debian GNU/Linux distribution\. +This man page was written by Rahul De «rahulde@swecha\.net» for Debian GNU/Linux distribution\. .SH "SEE ALSO" synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1) diff --git a/debian/hash_password.1.html b/debian/hash_password.1.html new file mode 100644 index 0000000000..7a62787780 --- /dev/null +++ b/debian/hash_password.1.html @@ -0,0 +1,182 @@ + + + + + + hash_password(1) - Calculate the hash of a new password, so that passwords can be reset + + + + +
+ + + +
    +
  1. hash_password(1)
  2. +
  3. +
  4. hash_password(1)
  5. +
+ + + +

NAME

+

+ hash_password - Calculate the hash of a new password, so that passwords can be reset +

+

SYNOPSIS

+ + + + + + + + + +
+hash_password [-p + +--password [password]] [-c + +--config file]
+ +

DESCRIPTION

+ +

hash_password calculates the hash of a supplied password using bcrypt.

+ +

hash_password takes a password as an parameter either on the command line +or the STDIN if not supplied.

+ +

It accepts an YAML file which can be used to specify parameters like the +number of rounds for bcrypt and password_config section having the pepper +value used for the hashing. By default bcrypt_rounds is set to 12.

+ +

The hashed password is written on the STDOUT.

+ +

FILES

+ +

A sample YAML file accepted by hash_password is described below:

+ +

bcrypt_rounds: 17 + password_config: + pepper: "random hashing pepper"

+ +

OPTIONS

+ +
+
+-p, --password +
+
Read the password form the command line if [password] is supplied, or from STDIN. +If not, prompt the user and read the password from the tty prompt. +It is not recommended to type the password on the command line +directly. Use the STDIN instead.
+
+-c, --config +
+
Read the supplied YAML file containing the options bcrypt_rounds +and the password_config section containing the pepper value.
+
+ +

EXAMPLES

+ +

Hash from the command line:

+ +
$ hash_password -p "p@ssw0rd"
+$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe
+
+ +

Hash from the stdin:

+ +
$ cat password_file | hash_password
+Password:
+Confirm password:
+$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
+
+ +

Hash from the prompt:

+ +
$ hash_password
+Password:
+Confirm password:
+$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
+
+ +

Using a config file:

+ +
$ hash_password -c config.yml
+Password:
+Confirm password:
+$2b$12$CwI.wBNr.w3kmiUlV3T5s.GT2wH7uebDCovDrCOh18dFedlANK99O
+
+ + + +

This man page was written by Rahul De «rahulde@swecha.net» +for Debian GNU/Linux distribution.

+ +

SEE ALSO

+ +

synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1)

+ +
    +
  1. +
  2. August 2024
  3. +
  4. hash_password(1)
  5. +
+ +
+ + diff --git a/debian/hash_password.ronn b/debian/hash_password.ronn index 5d0df53802..b68d4a210e 100644 --- a/debian/hash_password.ronn +++ b/debian/hash_password.ronn @@ -29,8 +29,8 @@ A sample YAML file accepted by `hash_password` is described below: ## OPTIONS * `-p`, `--password`: - Read the password form the command line if [password] is supplied. - If not, prompt the user and read the password form the `STDIN`. + Read the password form the command line if [password] is supplied, or from `STDIN`. + If not, prompt the user and read the password from the tty prompt. It is not recommended to type the password on the command line directly. Use the STDIN instead. @@ -45,7 +45,14 @@ Hash from the command line: $ hash_password -p "p@ssw0rd" $2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe -Hash from the STDIN: +Hash from the stdin: + + $ cat password_file | hash_password + Password: + Confirm password: + $2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG + +Hash from the prompt: $ hash_password Password: diff --git a/synapse/_scripts/hash_password.py b/synapse/_scripts/hash_password.py index 3bed367be2..2b7d3585cb 100755 --- a/synapse/_scripts/hash_password.py +++ b/synapse/_scripts/hash_password.py @@ -56,7 +56,9 @@ def main() -> None: password_pepper = password_config.get("pepper", password_pepper) password = args.password - if not password: + if not password and not sys.stdin.isatty(): + password = sys.stdin.readline().strip() + elif not password: password = prompt_for_pass() # On Python 2, make sure we decode it to Unicode before we normalise it From f4032d3e718c806a193fb5167d9958052cd47add Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Aug 2024 10:09:26 +0100 Subject: [PATCH 158/332] Bump serde from 1.0.208 to 1.0.209 (#17613) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3ac1dae7d..18936ab843 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.208" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.208" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" dependencies = [ "proc-macro2", "quote", From e563e4bdf3e7d55d767e24205e594f520732986d Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 28 Aug 2024 03:29:12 -0700 Subject: [PATCH 159/332] Fix content length on federation `/thumbnail` responses (#17532) --- changelog.d/17532.bugfix | 1 + synapse/media/_base.py | 22 ++++++++++----------- synapse/media/media_repository.py | 6 +++--- synapse/media/thumbnailer.py | 32 +++++++++++++++++++++++++------ 4 files changed, 41 insertions(+), 20 deletions(-) create mode 100644 changelog.d/17532.bugfix diff --git a/changelog.d/17532.bugfix b/changelog.d/17532.bugfix new file mode 100644 index 0000000000..5b05f0f9ba --- /dev/null +++ b/changelog.d/17532.bugfix @@ -0,0 +1 @@ +Fix content-length on federation /thumbnail responses. diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 9341d4859e..7877df62fa 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -60,8 +60,6 @@ from synapse.util.stringutils import is_ascii if TYPE_CHECKING: from synapse.server import HomeServer - from synapse.storage.databases.main.media_repository import LocalMedia - logger = logging.getLogger(__name__) @@ -290,7 +288,9 @@ async def respond_with_multipart_responder( clock: Clock, request: SynapseRequest, responder: "Optional[Responder]", - media_info: "LocalMedia", + media_type: str, + media_length: Optional[int], + upload_name: Optional[str], ) -> None: """ Responds to requests originating from the federation media `/download` endpoint by @@ -314,7 +314,7 @@ async def respond_with_multipart_responder( ) return - if media_info.media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES: + if media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES: disposition = "inline" else: disposition = "attachment" @@ -322,16 +322,16 @@ async def respond_with_multipart_responder( def _quote(x: str) -> str: return urllib.parse.quote(x.encode("utf-8")) - if media_info.upload_name: - if _can_encode_filename_as_token(media_info.upload_name): + if upload_name: + if _can_encode_filename_as_token(upload_name): disposition = "%s; filename=%s" % ( disposition, - media_info.upload_name, + upload_name, ) else: disposition = "%s; filename*=utf-8''%s" % ( disposition, - _quote(media_info.upload_name), + _quote(upload_name), ) from synapse.media.media_storage import MultipartFileConsumer @@ -341,14 +341,14 @@ async def respond_with_multipart_responder( multipart_consumer = MultipartFileConsumer( clock, request, - media_info.media_type, + media_type, {}, disposition, - media_info.media_length, + media_length, ) logger.debug("Responding to media request with responder %s", responder) - if media_info.media_length is not None: + if media_length is not None: content_length = multipart_consumer.content_length() assert content_length is not None request.setHeader(b"Content-Length", b"%d" % (content_length,)) diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 8bc92305fe..0b74209232 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -471,7 +471,7 @@ class MediaRepository: responder = await self.media_storage.fetch_media(file_info) if federation: await respond_with_multipart_responder( - self.clock, request, responder, media_info + self.clock, request, responder, media_type, media_length, upload_name ) else: await respond_with_responder( @@ -1008,7 +1008,7 @@ class MediaRepository: t_method: str, t_type: str, url_cache: bool, - ) -> Optional[str]: + ) -> Optional[Tuple[str, FileInfo]]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(None, media_id, url_cache=url_cache) ) @@ -1070,7 +1070,7 @@ class MediaRepository: t_len, ) - return output_path + return output_path, file_info # Could not generate thumbnail. return None diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index 042851021c..ee1118a53a 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -348,7 +348,12 @@ class ThumbnailProvider: if responder: if for_federation: await respond_with_multipart_responder( - self.hs.get_clock(), request, responder, media_info + self.hs.get_clock(), + request, + responder, + info.type, + info.length, + None, ) return else: @@ -360,7 +365,7 @@ class ThumbnailProvider: logger.debug("We don't have a thumbnail of that size. Generating") # Okay, so we generate one. - file_path = await self.media_repo.generate_local_exact_thumbnail( + thumbnail_result = await self.media_repo.generate_local_exact_thumbnail( media_id, desired_width, desired_height, @@ -369,13 +374,18 @@ class ThumbnailProvider: url_cache=bool(media_info.url_cache), ) - if file_path: + if thumbnail_result: + file_path, file_info = thumbnail_result + assert file_info.thumbnail is not None + if for_federation: await respond_with_multipart_responder( self.hs.get_clock(), request, FileResponder(self.hs, open(file_path, "rb")), - media_info, + file_info.thumbnail.type, + file_info.thumbnail.length, + None, ) else: await respond_with_file(self.hs, request, desired_type, file_path) @@ -580,7 +590,12 @@ class ThumbnailProvider: if for_federation: assert media_info is not None await respond_with_multipart_responder( - self.hs.get_clock(), request, responder, media_info + self.hs.get_clock(), + request, + responder, + file_info.thumbnail.type, + file_info.thumbnail.length, + None, ) return else: @@ -634,7 +649,12 @@ class ThumbnailProvider: if for_federation: assert media_info is not None await respond_with_multipart_responder( - self.hs.get_clock(), request, responder, media_info + self.hs.get_clock(), + request, + responder, + file_info.thumbnail.type, + file_info.thumbnail.length, + None, ) else: await respond_with_responder( From e75a23a63d5fe8edb0145bdd55b98d9e6a29b2ab Mon Sep 17 00:00:00 2001 From: Krishan <33421343+kfiven@users.noreply.github.com> Date: Thu, 29 Aug 2024 00:45:49 +1000 Subject: [PATCH 160/332] Fix hierarchy returning 403 when room is accessible through federation (#17194) --- changelog.d/17194.bugfix | 1 + synapse/handlers/room_summary.py | 25 +++++++++++++-- tests/handlers/test_room_summary.py | 48 +++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17194.bugfix diff --git a/changelog.d/17194.bugfix b/changelog.d/17194.bugfix new file mode 100644 index 0000000000..29ac56ceac --- /dev/null +++ b/changelog.d/17194.bugfix @@ -0,0 +1 @@ +Fix hierarchy returning 403 when room is accessible through federation. Contributed by Krishan (@kfiven). diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 720459f1e7..64f5bea014 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -183,8 +183,13 @@ class RoomSummaryHandler: ) -> JsonDict: """See docstring for SpaceSummaryHandler.get_room_hierarchy.""" - # First of all, check that the room is accessible. - if not await self._is_local_room_accessible(requested_room_id, requester): + # If the room is available locally, quickly check that the user can access it. + local_room = await self._store.is_host_joined( + requested_room_id, self._server_name + ) + if local_room and not await self._is_local_room_accessible( + requested_room_id, requester + ): raise UnstableSpecAuthError( 403, "User %s not in room %s, and room previews are disabled" @@ -192,6 +197,22 @@ class RoomSummaryHandler: errcode=Codes.NOT_JOINED, ) + if not local_room: + room_hierarchy = await self._summarize_remote_room_hierarchy( + _RoomQueueEntry(requested_room_id, ()), + False, + ) + root_room_entry = room_hierarchy[0] + if not root_room_entry or not await self._is_remote_room_accessible( + requester, requested_room_id, root_room_entry.room + ): + raise UnstableSpecAuthError( + 403, + "User %s not in room %s, and room previews are disabled" + % (requester, requested_room_id), + errcode=Codes.NOT_JOINED, + ) + # If this is continuing a previous session, pull the persisted data. if from_token: try: diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index 244a4e7689..b55fa1a8fd 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -757,6 +757,54 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): ) self._assert_hierarchy(result, expected) + def test_fed_root(self) -> None: + """ + Test if requested room is available over federation. + """ + fed_hostname = self.hs.hostname + "2" + fed_space = "#fed_space:" + fed_hostname + fed_subroom = "#fed_sub_room:" + fed_hostname + + requested_room_entry = _RoomEntry( + fed_space, + { + "room_id": fed_space, + "world_readable": True, + "room_type": RoomTypes.SPACE, + }, + [ + { + "type": EventTypes.SpaceChild, + "room_id": fed_space, + "state_key": fed_subroom, + "content": {"via": [fed_hostname]}, + } + ], + ) + child_room = { + "room_id": fed_subroom, + "world_readable": True, + } + + async def summarize_remote_room_hierarchy( + _self: Any, room: Any, suggested_only: bool + ) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]: + return requested_room_entry, {fed_subroom: child_room}, set() + + expected = [ + (fed_space, [fed_subroom]), + (fed_subroom, ()), + ] + + with mock.patch( + "synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy", + new=summarize_remote_room_hierarchy, + ): + result = self.get_success( + self.handler.get_room_hierarchy(create_requester(self.user), fed_space) + ) + self._assert_hierarchy(result, expected) + def test_fed_filtering(self) -> None: """ Rooms returned over federation should be properly filtered to only include From 689641b903e17cb9dc80e273d25c88a235b02fbe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 28 Aug 2024 18:42:19 +0100 Subject: [PATCH 161/332] Sliding sync: factor out room list logic (#17622) Move calculating of the room lists out of the core handler. This should make it easier to switch things around to start using the tables in #17512. This is just moving code between files and methods. Reviewable commit-by-commit --- changelog.d/17622.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 1287 +----------------- synapse/handlers/sliding_sync/extensions.py | 25 +- synapse/handlers/sliding_sync/room_lists.py | 1353 +++++++++++++++++++ synapse/rest/client/sync.py | 4 +- synapse/types/handlers/__init__.py | 2 +- tests/handlers/test_sliding_sync.py | 152 +-- 7 files changed, 1468 insertions(+), 1356 deletions(-) create mode 100644 changelog.d/17622.misc create mode 100644 synapse/handlers/sliding_sync/room_lists.py diff --git a/changelog.d/17622.misc b/changelog.d/17622.misc new file mode 100644 index 0000000000..af064f7e13 --- /dev/null +++ b/changelog.d/17622.misc @@ -0,0 +1 @@ +Refactor sliding sync code to move room list logic out into a separate class. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 1fcf2d149b..ccd464cd1c 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -12,38 +12,22 @@ # . # -import enum import logging from itertools import chain -from typing import ( - TYPE_CHECKING, - Any, - Dict, - List, - Literal, - Mapping, - Optional, - Set, - Tuple, - Union, -) +from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, Tuple -import attr -from immutabledict import immutabledict from prometheus_client import Histogram from typing_extensions import assert_never -from synapse.api.constants import ( - AccountDataTypes, - Direction, - EventContentFields, - EventTypes, - Membership, -) -from synapse.events import EventBase, StrippedStateEvent -from synapse.events.utils import parse_stripped_state_event, strip_event +from synapse.api.constants import Direction, EventTypes, Membership +from synapse.events import EventBase +from synapse.events.utils import strip_event from synapse.handlers.relations import BundledAggregations from synapse.handlers.sliding_sync.extensions import SlidingSyncExtensionHandler +from synapse.handlers.sliding_sync.room_lists import ( + SlidingSyncRoomLists, + _RoomMembershipForUser, +) from synapse.handlers.sliding_sync.store import SlidingSyncConnectionStore from synapse.handlers.sliding_sync.types import ( HaveSentRoomFlag, @@ -61,29 +45,19 @@ from synapse.logging.opentracing import ( trace, ) from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary -from synapse.storage.databases.main.state import ( - ROOM_UNKNOWN_SENTINEL, - Sentinel as StateSentinel, -) -from synapse.storage.databases.main.stream import ( - CurrentStateDeltaMembership, - PaginateFunction, -) +from synapse.storage.databases.main.stream import PaginateFunction from synapse.storage.roommember import MemberSummary from synapse.types import ( JsonDict, - MutableStateMap, PersistedEventPosition, Requester, RoomStreamToken, SlidingSyncStreamToken, StateMap, - StrCollection, StreamKeyType, StreamToken, - UserID, ) -from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult +from synapse.types.handlers import SlidingSyncConfig, SlidingSyncResult from synapse.types.state import StateFilter from synapse.util.async_helpers import concurrently_execute from synapse.visibility import filter_events_for_client @@ -101,12 +75,6 @@ sync_processing_time = Histogram( ) -class Sentinel(enum.Enum): - # defining a sentinel in this way allows mypy to correctly handle the - # type of a dictionary lookup and subsequent type narrowing. - UNSET_SENTINEL = object() - - # The event types that clients should consider as new activity. DEFAULT_BUMP_EVENT_TYPES = { EventTypes.Create, @@ -119,81 +87,6 @@ DEFAULT_BUMP_EVENT_TYPES = { } -@attr.s(slots=True, frozen=True, auto_attribs=True) -class _RoomMembershipForUser: - """ - Attributes: - room_id: The room ID of the membership event - event_id: The event ID of the membership event - event_pos: The stream position of the membership event - membership: The membership state of the user in the room - sender: The person who sent the membership event - newly_joined: Whether the user newly joined the room during the given token - range and is still joined to the room at the end of this range. - newly_left: Whether the user newly left (or kicked) the room during the given - token range and is still "leave" at the end of this range. - is_dm: Whether this user considers this room as a direct-message (DM) room - """ - - room_id: str - # Optional because state resets can affect room membership without a corresponding event. - event_id: Optional[str] - # Even during a state reset which removes the user from the room, we expect this to - # be set because `current_state_delta_stream` will note the position that the reset - # happened. - event_pos: PersistedEventPosition - # Even during a state reset which removes the user from the room, we expect this to - # be set to `LEAVE` because we can make that assumption based on the situaton (see - # `get_current_state_delta_membership_changes_for_user(...)`) - membership: str - # Optional because state resets can affect room membership without a corresponding event. - sender: Optional[str] - newly_joined: bool - newly_left: bool - is_dm: bool - - def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser": - return attr.evolve(self, **kwds) - - -def filter_membership_for_sync( - *, user_id: str, room_membership_for_user: _RoomMembershipForUser -) -> bool: - """ - Returns True if the membership event should be included in the sync response, - otherwise False. - - Attributes: - user_id: The user ID that the membership applies to - room_membership_for_user: Membership information for the user in the room - """ - - membership = room_membership_for_user.membership - sender = room_membership_for_user.sender - newly_left = room_membership_for_user.newly_left - - # We want to allow everything except rooms the user has left unless `newly_left` - # because we want everything that's *still* relevant to the user. We include - # `newly_left` rooms because the last event that the user should see is their own - # leave event. - # - # A leave != kick. This logic includes kicks (leave events where the sender is not - # the same user). - # - # When `sender=None`, it means that a state reset happened that removed the user - # from the room without a corresponding leave event. We can just remove the rooms - # since they are no longer relevant to the user but will still appear if they are - # `newly_left`. - return ( - # Anything except leave events - membership != Membership.LEAVE - # Unless... - or newly_left - # Allow kicks - or (membership == Membership.LEAVE and sender not in (user_id, None)) - ) - - class SlidingSyncHandler: def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @@ -208,6 +101,7 @@ class SlidingSyncHandler: self.connection_store = SlidingSyncConnectionStore() self.extensions = SlidingSyncExtensionHandler(hs) + self.room_lists = SlidingSyncRoomLists(hs) async def wait_for_sync_for_user( self, @@ -343,232 +237,23 @@ class SlidingSyncHandler: sync_config.room_subscriptions is not None and len(sync_config.room_subscriptions) > 0 ) - if has_lists or has_room_subscriptions: - room_membership_for_user_map = ( - await self.get_room_membership_for_user_at_to_token( - user=sync_config.user, - to_token=to_token, - from_token=from_token.stream_token if from_token else None, - ) - ) - # Assemble sliding window lists - lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} - # Keep track of the rooms that we can display and need to fetch more info about - relevant_room_map: Dict[str, RoomSyncConfig] = {} - # The set of room IDs of all rooms that could appear in any list. These - # include rooms that are outside the list ranges. - all_rooms: Set[str] = set() - if has_lists and sync_config.lists is not None: - with start_active_span("assemble_sliding_window_lists"): - sync_room_map = await self.filter_rooms_relevant_for_sync( - user=sync_config.user, - room_membership_for_user_map=room_membership_for_user_map, - ) + interested_rooms = await self.room_lists.compute_interested_rooms( + sync_config=sync_config, + previous_connection_state=previous_connection_state, + from_token=from_token.stream_token if from_token else None, + to_token=to_token, + ) - for list_key, list_config in sync_config.lists.items(): - # Apply filters - filtered_sync_room_map = sync_room_map - if list_config.filters is not None: - filtered_sync_room_map = await self.filter_rooms( - sync_config.user, - sync_room_map, - list_config.filters, - to_token, - ) - - # Find which rooms are partially stated and may need to be filtered out - # depending on the `required_state` requested (see below). - partial_state_room_map = ( - await self.store.is_partial_state_room_batched( - filtered_sync_room_map.keys() - ) - ) - - # Since creating the `RoomSyncConfig` takes some work, let's just do it - # once and make a copy whenever we need it. - room_sync_config = RoomSyncConfig.from_room_config(list_config) - - # Exclude partially-stated rooms if we must wait for the room to be - # fully-stated - if room_sync_config.must_await_full_state(self.is_mine_id): - filtered_sync_room_map = { - room_id: room - for room_id, room in filtered_sync_room_map.items() - if not partial_state_room_map.get(room_id) - } - - all_rooms.update(filtered_sync_room_map) - - # Sort the list - sorted_room_info = await self.sort_rooms( - filtered_sync_room_map, to_token - ) - - ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] - if list_config.ranges: - for range in list_config.ranges: - room_ids_in_list: List[str] = [] - - # We're going to loop through the sorted list of rooms starting - # at the range start index and keep adding rooms until we fill - # up the range or run out of rooms. - # - # Both sides of range are inclusive so we `+ 1` - max_num_rooms = range[1] - range[0] + 1 - for room_membership in sorted_room_info[range[0] :]: - room_id = room_membership.room_id - - if len(room_ids_in_list) >= max_num_rooms: - break - - # Take the superset of the `RoomSyncConfig` for each room. - # - # Update our `relevant_room_map` with the room we're going - # to display and need to fetch more info about. - existing_room_sync_config = relevant_room_map.get( - room_id - ) - if existing_room_sync_config is not None: - existing_room_sync_config.combine_room_sync_config( - room_sync_config - ) - else: - # Make a copy so if we modify it later, it doesn't - # affect all references. - relevant_room_map[room_id] = ( - room_sync_config.deep_copy() - ) - - room_ids_in_list.append(room_id) - - ops.append( - SlidingSyncResult.SlidingWindowList.Operation( - op=OperationType.SYNC, - range=range, - room_ids=room_ids_in_list, - ) - ) - - lists[list_key] = SlidingSyncResult.SlidingWindowList( - count=len(sorted_room_info), - ops=ops, - ) - - # Handle room subscriptions - if has_room_subscriptions and sync_config.room_subscriptions is not None: - with start_active_span("assemble_room_subscriptions"): - # Find which rooms are partially stated and may need to be filtered out - # depending on the `required_state` requested (see below). - partial_state_room_map = await self.store.is_partial_state_room_batched( - sync_config.room_subscriptions.keys() - ) - - for ( - room_id, - room_subscription, - ) in sync_config.room_subscriptions.items(): - room_membership_for_user_at_to_token = ( - await self.check_room_subscription_allowed_for_user( - room_id=room_id, - room_membership_for_user_map=room_membership_for_user_map, - to_token=to_token, - ) - ) - - # Skip this room if the user isn't allowed to see it - if not room_membership_for_user_at_to_token: - continue - - all_rooms.add(room_id) - - room_membership_for_user_map[room_id] = ( - room_membership_for_user_at_to_token - ) - - # Take the superset of the `RoomSyncConfig` for each room. - room_sync_config = RoomSyncConfig.from_room_config( - room_subscription - ) - - # Exclude partially-stated rooms if we must wait for the room to be - # fully-stated - if room_sync_config.must_await_full_state(self.is_mine_id): - if partial_state_room_map.get(room_id): - continue - - all_rooms.add(room_id) - - # Update our `relevant_room_map` with the room we're going to display - # and need to fetch more info about. - existing_room_sync_config = relevant_room_map.get(room_id) - if existing_room_sync_config is not None: - existing_room_sync_config.combine_room_sync_config( - room_sync_config - ) - else: - relevant_room_map[room_id] = room_sync_config + lists = interested_rooms.lists + relevant_room_map = interested_rooms.relevant_room_map + all_rooms = interested_rooms.all_rooms + room_membership_for_user_map = interested_rooms.room_membership_for_user_map + relevant_rooms_to_send_map = interested_rooms.relevant_rooms_to_send_map # Fetch room data rooms: Dict[str, SlidingSyncResult.RoomResult] = {} - # Filter out rooms that haven't received updates and we've sent down - # previously. - # Keep track of the rooms that we're going to display and need to fetch more info about - relevant_rooms_to_send_map = relevant_room_map - with start_active_span("filter_relevant_rooms_to_send"): - if from_token: - rooms_should_send = set() - - # First we check if there are rooms that match a list/room - # subscription and have updates we need to send (i.e. either because - # we haven't sent the room down, or we have but there are missing - # updates). - for room_id, room_config in relevant_room_map.items(): - prev_room_sync_config = previous_connection_state.room_configs.get( - room_id - ) - if prev_room_sync_config is not None: - # Always include rooms whose timeline limit has increased. - # (see the "XXX: Odd behavior" described below) - if ( - prev_room_sync_config.timeline_limit - < room_config.timeline_limit - ): - rooms_should_send.add(room_id) - continue - - status = previous_connection_state.rooms.have_sent_room(room_id) - if ( - # The room was never sent down before so the client needs to know - # about it regardless of any updates. - status.status == HaveSentRoomFlag.NEVER - # `PREVIOUSLY` literally means the "room was sent down before *AND* - # there are updates we haven't sent down" so we already know this - # room has updates. - or status.status == HaveSentRoomFlag.PREVIOUSLY - ): - rooms_should_send.add(room_id) - elif status.status == HaveSentRoomFlag.LIVE: - # We know that we've sent all updates up until `from_token`, - # so we just need to check if there have been updates since - # then. - pass - else: - assert_never(status.status) - - # We only need to check for new events since any state changes - # will also come down as new events. - rooms_that_have_updates = self.store.get_rooms_that_might_have_updates( - relevant_room_map.keys(), from_token.stream_token.room_key - ) - rooms_should_send.update(rooms_that_have_updates) - relevant_rooms_to_send_map = { - room_id: room_sync_config - for room_id, room_sync_config in relevant_room_map.items() - if room_id in rooms_should_send - } - new_connection_state = previous_connection_state.get_mutable() @trace @@ -678,934 +363,6 @@ class SlidingSyncHandler: return sliding_sync_result - @trace - async def get_room_membership_for_user_at_to_token( - self, - user: UserID, - to_token: StreamToken, - from_token: Optional[StreamToken], - ) -> Dict[str, _RoomMembershipForUser]: - """ - Fetch room IDs that the user has had membership in (the full room list including - long-lost left rooms that will be filtered, sorted, and sliced). - - We're looking for rooms where the user has had any sort of membership in the - token range (> `from_token` and <= `to_token`) - - In order for bans/kicks to not show up, you need to `/forget` those rooms. This - doesn't modify the event itself though and only adds the `forgotten` flag to the - `room_memberships` table in Synapse. There isn't a way to tell when a room was - forgotten at the moment so we can't factor it into the token range. - - Args: - user: User to fetch rooms for - to_token: The token to fetch rooms up to. - from_token: The point in the stream to sync from. - - Returns: - A dictionary of room IDs that the user has had membership in along with - membership information in that room at the time of `to_token`. - """ - user_id = user.to_string() - - # First grab a current snapshot rooms for the user - # (also handles forgotten rooms) - room_for_user_list = await self.store.get_rooms_for_local_user_where_membership_is( - user_id=user_id, - # We want to fetch any kind of membership (joined and left rooms) in order - # to get the `event_pos` of the latest room membership event for the - # user. - membership_list=Membership.LIST, - excluded_rooms=self.rooms_to_exclude_globally, - ) - - # If the user has never joined any rooms before, we can just return an empty list - if not room_for_user_list: - return {} - - # Our working list of rooms that can show up in the sync response - sync_room_id_set = { - # Note: The `room_for_user` we're assigning here will need to be fixed up - # (below) because they are potentially from the current snapshot time - # instead from the time of the `to_token`. - room_for_user.room_id: _RoomMembershipForUser( - room_id=room_for_user.room_id, - event_id=room_for_user.event_id, - event_pos=room_for_user.event_pos, - membership=room_for_user.membership, - sender=room_for_user.sender, - # We will update these fields below to be accurate - newly_joined=False, - newly_left=False, - is_dm=False, - ) - for room_for_user in room_for_user_list - } - - # Get the `RoomStreamToken` that represents the spot we queried up to when we got - # our membership snapshot from `get_rooms_for_local_user_where_membership_is()`. - # - # First, we need to get the max stream_ordering of each event persister instance - # that we queried events from. - instance_to_max_stream_ordering_map: Dict[str, int] = {} - for room_for_user in room_for_user_list: - instance_name = room_for_user.event_pos.instance_name - stream_ordering = room_for_user.event_pos.stream - - current_instance_max_stream_ordering = ( - instance_to_max_stream_ordering_map.get(instance_name) - ) - if ( - current_instance_max_stream_ordering is None - or stream_ordering > current_instance_max_stream_ordering - ): - instance_to_max_stream_ordering_map[instance_name] = stream_ordering - - # Then assemble the `RoomStreamToken` - min_stream_pos = min(instance_to_max_stream_ordering_map.values()) - membership_snapshot_token = RoomStreamToken( - # Minimum position in the `instance_map` - stream=min_stream_pos, - instance_map=immutabledict( - { - instance_name: stream_pos - for instance_name, stream_pos in instance_to_max_stream_ordering_map.items() - if stream_pos > min_stream_pos - } - ), - ) - - # Since we fetched the users room list at some point in time after the from/to - # tokens, we need to revert/rewind some membership changes to match the point in - # time of the `to_token`. In particular, we need to make these fixups: - # - # - 1a) Remove rooms that the user joined after the `to_token` - # - 1b) Add back rooms that the user left after the `to_token` - # - 1c) Update room membership events to the point in time of the `to_token` - # - 2) Figure out which rooms are `newly_left` rooms (> `from_token` and <= `to_token`) - # - 3) Figure out which rooms are `newly_joined` (> `from_token` and <= `to_token`) - # - 4) Figure out which rooms are DM's - - # 1) Fetch membership changes that fall in the range from `to_token` up to - # `membership_snapshot_token` - # - # If our `to_token` is already the same or ahead of the latest room membership - # for the user, we don't need to do any "2)" fix-ups and can just straight-up - # use the room list from the snapshot as a base (nothing has changed) - current_state_delta_membership_changes_after_to_token = [] - if not membership_snapshot_token.is_before_or_eq(to_token.room_key): - current_state_delta_membership_changes_after_to_token = ( - await self.store.get_current_state_delta_membership_changes_for_user( - user_id, - from_key=to_token.room_key, - to_key=membership_snapshot_token, - excluded_room_ids=self.rooms_to_exclude_globally, - ) - ) - - # 1) Assemble a list of the first membership event after the `to_token` so we can - # step backward to the previous membership that would apply to the from/to - # range. - first_membership_change_by_room_id_after_to_token: Dict[ - str, CurrentStateDeltaMembership - ] = {} - for membership_change in current_state_delta_membership_changes_after_to_token: - # Only set if we haven't already set it - first_membership_change_by_room_id_after_to_token.setdefault( - membership_change.room_id, membership_change - ) - - # 1) Fixup - # - # Since we fetched a snapshot of the users room list at some point in time after - # the from/to tokens, we need to revert/rewind some membership changes to match - # the point in time of the `to_token`. - for ( - room_id, - first_membership_change_after_to_token, - ) in first_membership_change_by_room_id_after_to_token.items(): - # 1a) Remove rooms that the user joined after the `to_token` - if first_membership_change_after_to_token.prev_event_id is None: - sync_room_id_set.pop(room_id, None) - # 1b) 1c) From the first membership event after the `to_token`, step backward to the - # previous membership that would apply to the from/to range. - else: - # We don't expect these fields to be `None` if we have a `prev_event_id` - # but we're being defensive since it's possible that the prev event was - # culled from the database. - if ( - first_membership_change_after_to_token.prev_event_pos is not None - and first_membership_change_after_to_token.prev_membership - is not None - ): - sync_room_id_set[room_id] = _RoomMembershipForUser( - room_id=room_id, - event_id=first_membership_change_after_to_token.prev_event_id, - event_pos=first_membership_change_after_to_token.prev_event_pos, - membership=first_membership_change_after_to_token.prev_membership, - sender=first_membership_change_after_to_token.prev_sender, - # We will update these fields below to be accurate - newly_joined=False, - newly_left=False, - is_dm=False, - ) - else: - # If we can't find the previous membership event, we shouldn't - # include the room in the sync response since we can't determine the - # exact membership state and shouldn't rely on the current snapshot. - sync_room_id_set.pop(room_id, None) - - # 2) Fetch membership changes that fall in the range from `from_token` up to `to_token` - current_state_delta_membership_changes_in_from_to_range = [] - if from_token: - current_state_delta_membership_changes_in_from_to_range = ( - await self.store.get_current_state_delta_membership_changes_for_user( - user_id, - from_key=from_token.room_key, - to_key=to_token.room_key, - excluded_room_ids=self.rooms_to_exclude_globally, - ) - ) - - # 2) Assemble a list of the last membership events in some given ranges. Someone - # could have left and joined multiple times during the given range but we only - # care about end-result so we grab the last one. - last_membership_change_by_room_id_in_from_to_range: Dict[ - str, CurrentStateDeltaMembership - ] = {} - # We also want to assemble a list of the first membership events during the token - # range so we can step backward to the previous membership that would apply to - # before the token range to see if we have `newly_joined` the room. - first_membership_change_by_room_id_in_from_to_range: Dict[ - str, CurrentStateDeltaMembership - ] = {} - # Keep track if the room has a non-join event in the token range so we can later - # tell if it was a `newly_joined` room. If the last membership event in the - # token range is a join and there is also some non-join in the range, we know - # they `newly_joined`. - has_non_join_event_by_room_id_in_from_to_range: Dict[str, bool] = {} - for ( - membership_change - ) in current_state_delta_membership_changes_in_from_to_range: - room_id = membership_change.room_id - - last_membership_change_by_room_id_in_from_to_range[room_id] = ( - membership_change - ) - # Only set if we haven't already set it - first_membership_change_by_room_id_in_from_to_range.setdefault( - room_id, membership_change - ) - - if membership_change.membership != Membership.JOIN: - has_non_join_event_by_room_id_in_from_to_range[room_id] = True - - # 2) Fixup - # - # 3) We also want to assemble a list of possibly newly joined rooms. Someone - # could have left and joined multiple times during the given range but we only - # care about whether they are joined at the end of the token range so we are - # working with the last membership even in the token range. - possibly_newly_joined_room_ids = set() - for ( - last_membership_change_in_from_to_range - ) in last_membership_change_by_room_id_in_from_to_range.values(): - room_id = last_membership_change_in_from_to_range.room_id - - # 3) - if last_membership_change_in_from_to_range.membership == Membership.JOIN: - possibly_newly_joined_room_ids.add(room_id) - - # 2) Figure out newly_left rooms (> `from_token` and <= `to_token`). - if last_membership_change_in_from_to_range.membership == Membership.LEAVE: - # 2) Mark this room as `newly_left` - - # If we're seeing a membership change here, we should expect to already - # have it in our snapshot but if a state reset happens, it wouldn't have - # shown up in our snapshot but appear as a change here. - existing_sync_entry = sync_room_id_set.get(room_id) - if existing_sync_entry is not None: - # Normal expected case - sync_room_id_set[room_id] = existing_sync_entry.copy_and_replace( - newly_left=True - ) - else: - # State reset! - logger.warn( - "State reset detected for room_id %s with %s who is no longer in the room", - room_id, - user_id, - ) - # Even though a state reset happened which removed the person from - # the room, we still add it the list so the user knows they left the - # room. Downstream code can check for a state reset by looking for - # `event_id=None and membership is not None`. - sync_room_id_set[room_id] = _RoomMembershipForUser( - room_id=room_id, - event_id=last_membership_change_in_from_to_range.event_id, - event_pos=last_membership_change_in_from_to_range.event_pos, - membership=last_membership_change_in_from_to_range.membership, - sender=last_membership_change_in_from_to_range.sender, - newly_joined=False, - newly_left=True, - is_dm=False, - ) - - # 3) Figure out `newly_joined` - for room_id in possibly_newly_joined_room_ids: - has_non_join_in_from_to_range = ( - has_non_join_event_by_room_id_in_from_to_range.get(room_id, False) - ) - # If the last membership event in the token range is a join and there is - # also some non-join in the range, we know they `newly_joined`. - if has_non_join_in_from_to_range: - # We found a `newly_joined` room (we left and joined within the token range) - sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( - newly_joined=True - ) - else: - prev_event_id = first_membership_change_by_room_id_in_from_to_range[ - room_id - ].prev_event_id - prev_membership = first_membership_change_by_room_id_in_from_to_range[ - room_id - ].prev_membership - - if prev_event_id is None: - # We found a `newly_joined` room (we are joining the room for the - # first time within the token range) - sync_room_id_set[room_id] = sync_room_id_set[ - room_id - ].copy_and_replace(newly_joined=True) - # Last resort, we need to step back to the previous membership event - # just before the token range to see if we're joined then or not. - elif prev_membership != Membership.JOIN: - # We found a `newly_joined` room (we left before the token range - # and joined within the token range) - sync_room_id_set[room_id] = sync_room_id_set[ - room_id - ].copy_and_replace(newly_joined=True) - - # 4) Figure out which rooms the user considers to be direct-message (DM) rooms - # - # We're using global account data (`m.direct`) instead of checking for - # `is_direct` on membership events because that property only appears for - # the invitee membership event (doesn't show up for the inviter). - # - # We're unable to take `to_token` into account for global account data since - # we only keep track of the latest account data for the user. - dm_map = await self.store.get_global_account_data_by_type_for_user( - user_id, AccountDataTypes.DIRECT - ) - - # Flatten out the map. Account data is set by the client so it needs to be - # scrutinized. - dm_room_id_set = set() - if isinstance(dm_map, dict): - for room_ids in dm_map.values(): - # Account data should be a list of room IDs. Ignore anything else - if isinstance(room_ids, list): - for room_id in room_ids: - if isinstance(room_id, str): - dm_room_id_set.add(room_id) - - # 4) Fixup - for room_id in sync_room_id_set: - sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( - is_dm=room_id in dm_room_id_set - ) - - return sync_room_id_set - - @trace - async def filter_rooms_relevant_for_sync( - self, - user: UserID, - room_membership_for_user_map: Dict[str, _RoomMembershipForUser], - ) -> Dict[str, _RoomMembershipForUser]: - """ - Filter room IDs that should/can be listed for this user in the sync response (the - full room list that will be further filtered, sorted, and sliced). - - We're looking for rooms where the user has the following state in the token - range (> `from_token` and <= `to_token`): - - - `invite`, `join`, `knock`, `ban` membership events - - Kicks (`leave` membership events where `sender` is different from the - `user_id`/`state_key`) - - `newly_left` (rooms that were left during the given token range) - - In order for bans/kicks to not show up in sync, you need to `/forget` those - rooms. This doesn't modify the event itself though and only adds the - `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way - to tell when a room was forgotten at the moment so we can't factor it into the - from/to range. - - Args: - user: User that is syncing - room_membership_for_user_map: Room membership for the user - - Returns: - A dictionary of room IDs that should be listed in the sync response along - with membership information in that room at the time of `to_token`. - """ - user_id = user.to_string() - - # Filter rooms to only what we're interested to sync with - filtered_sync_room_map = { - room_id: room_membership_for_user - for room_id, room_membership_for_user in room_membership_for_user_map.items() - if filter_membership_for_sync( - user_id=user_id, - room_membership_for_user=room_membership_for_user, - ) - } - - return filtered_sync_room_map - - async def check_room_subscription_allowed_for_user( - self, - room_id: str, - room_membership_for_user_map: Dict[str, _RoomMembershipForUser], - to_token: StreamToken, - ) -> Optional[_RoomMembershipForUser]: - """ - Check whether the user is allowed to see the room based on whether they have - ever had membership in the room or if the room is `world_readable`. - - Similar to `check_user_in_room_or_world_readable(...)` - - Args: - room_id: Room to check - room_membership_for_user_map: Room membership for the user at the time of - the `to_token` (<= `to_token`). - to_token: The token to fetch rooms up to. - - Returns: - The room membership for the user if they are allowed to subscribe to the - room else `None`. - """ - - # We can first check if they are already allowed to see the room based - # on our previous work to assemble the `room_membership_for_user_map`. - # - # If they have had any membership in the room over time (up to the `to_token`), - # let them subscribe and see what they can. - existing_membership_for_user = room_membership_for_user_map.get(room_id) - if existing_membership_for_user is not None: - return existing_membership_for_user - - # TODO: Handle `world_readable` rooms - return None - - # If the room is `world_readable`, it doesn't matter whether they can join, - # everyone can see the room. - # not_in_room_membership_for_user = _RoomMembershipForUser( - # room_id=room_id, - # event_id=None, - # event_pos=None, - # membership=None, - # sender=None, - # newly_joined=False, - # newly_left=False, - # is_dm=False, - # ) - # room_state = await self.get_current_state_at( - # room_id=room_id, - # room_membership_for_user_at_to_token=not_in_room_membership_for_user, - # state_filter=StateFilter.from_types( - # [(EventTypes.RoomHistoryVisibility, "")] - # ), - # to_token=to_token, - # ) - - # visibility_event = room_state.get((EventTypes.RoomHistoryVisibility, "")) - # if ( - # visibility_event is not None - # and visibility_event.content.get("history_visibility") - # == HistoryVisibility.WORLD_READABLE - # ): - # return not_in_room_membership_for_user - - # return None - - @trace - async def _bulk_get_stripped_state_for_rooms_from_sync_room_map( - self, - room_ids: StrCollection, - sync_room_map: Dict[str, _RoomMembershipForUser], - ) -> Dict[str, Optional[StateMap[StrippedStateEvent]]]: - """ - Fetch stripped state for a list of room IDs. Stripped state is only - applicable to invite/knock rooms. Other rooms will have `None` as their - stripped state. - - For invite rooms, we pull from `unsigned.invite_room_state`. - For knock rooms, we pull from `unsigned.knock_room_state`. - - Args: - room_ids: Room IDs to fetch stripped state for - sync_room_map: Dictionary of room IDs to sort along with membership - information in the room at the time of `to_token`. - - Returns: - Mapping from room_id to mapping of (type, state_key) to stripped state - event. - """ - room_id_to_stripped_state_map: Dict[ - str, Optional[StateMap[StrippedStateEvent]] - ] = {} - - # Fetch what we haven't before - room_ids_to_fetch = [ - room_id - for room_id in room_ids - if room_id not in room_id_to_stripped_state_map - ] - - # Gather a list of event IDs we can grab stripped state from - invite_or_knock_event_ids: List[str] = [] - for room_id in room_ids_to_fetch: - if sync_room_map[room_id].membership in ( - Membership.INVITE, - Membership.KNOCK, - ): - event_id = sync_room_map[room_id].event_id - # If this is an invite/knock then there should be an event_id - assert event_id is not None - invite_or_knock_event_ids.append(event_id) - else: - room_id_to_stripped_state_map[room_id] = None - - invite_or_knock_events = await self.store.get_events(invite_or_knock_event_ids) - for invite_or_knock_event in invite_or_knock_events.values(): - room_id = invite_or_knock_event.room_id - membership = invite_or_knock_event.membership - - raw_stripped_state_events = None - if membership == Membership.INVITE: - invite_room_state = invite_or_knock_event.unsigned.get( - "invite_room_state" - ) - raw_stripped_state_events = invite_room_state - elif membership == Membership.KNOCK: - knock_room_state = invite_or_knock_event.unsigned.get( - "knock_room_state" - ) - raw_stripped_state_events = knock_room_state - else: - raise AssertionError( - f"Unexpected membership {membership} (this is a problem with Synapse itself)" - ) - - stripped_state_map: Optional[MutableStateMap[StrippedStateEvent]] = None - # Scrutinize unsigned things. `raw_stripped_state_events` should be a list - # of stripped events - if raw_stripped_state_events is not None: - stripped_state_map = {} - if isinstance(raw_stripped_state_events, list): - for raw_stripped_event in raw_stripped_state_events: - stripped_state_event = parse_stripped_state_event( - raw_stripped_event - ) - if stripped_state_event is not None: - stripped_state_map[ - ( - stripped_state_event.type, - stripped_state_event.state_key, - ) - ] = stripped_state_event - - room_id_to_stripped_state_map[room_id] = stripped_state_map - - return room_id_to_stripped_state_map - - @trace - async def _bulk_get_partial_current_state_content_for_rooms( - self, - content_type: Literal[ - # `content.type` from `EventTypes.Create`` - "room_type", - # `content.algorithm` from `EventTypes.RoomEncryption` - "room_encryption", - ], - room_ids: Set[str], - sync_room_map: Dict[str, _RoomMembershipForUser], - to_token: StreamToken, - room_id_to_stripped_state_map: Dict[ - str, Optional[StateMap[StrippedStateEvent]] - ], - ) -> Mapping[str, Union[Optional[str], StateSentinel]]: - """ - Get the given state event content for a list of rooms. First we check the - current state of the room, then fallback to stripped state if available, then - historical state. - - Args: - content_type: Which content to grab - room_ids: Room IDs to fetch the given content field for. - sync_room_map: Dictionary of room IDs to sort along with membership - information in the room at the time of `to_token`. - to_token: We filter based on the state of the room at this token - room_id_to_stripped_state_map: This does not need to be filled in before - calling this function. Mapping from room_id to mapping of (type, state_key) - to stripped state event. Modified in place when we fetch new rooms so we can - save work next time this function is called. - - Returns: - A mapping from room ID to the state event content if the room has - the given state event (event_type, ""), otherwise `None`. Rooms unknown to - this server will return `ROOM_UNKNOWN_SENTINEL`. - """ - room_id_to_content: Dict[str, Union[Optional[str], StateSentinel]] = {} - - # As a bulk shortcut, use the current state if the server is particpating in the - # room (meaning we have current state). Ideally, for leave/ban rooms, we would - # want the state at the time of the membership instead of current state to not - # leak anything but we consider the create/encryption stripped state events to - # not be a secret given they are often set at the start of the room and they are - # normally handed out on invite/knock. - # - # Be mindful to only use this for non-sensitive details. For example, even - # though the room name/avatar/topic are also stripped state, they seem a lot - # more senstive to leak the current state value of. - # - # Since this function is cached, we need to make a mutable copy via - # `dict(...)`. - event_type = "" - event_content_field = "" - if content_type == "room_type": - event_type = EventTypes.Create - event_content_field = EventContentFields.ROOM_TYPE - room_id_to_content = dict(await self.store.bulk_get_room_type(room_ids)) - elif content_type == "room_encryption": - event_type = EventTypes.RoomEncryption - event_content_field = EventContentFields.ENCRYPTION_ALGORITHM - room_id_to_content = dict( - await self.store.bulk_get_room_encryption(room_ids) - ) - else: - assert_never(content_type) - - room_ids_with_results = [ - room_id - for room_id, content_field in room_id_to_content.items() - if content_field is not ROOM_UNKNOWN_SENTINEL - ] - - # We might not have current room state for remote invite/knocks if we are - # the first person on our server to see the room. The best we can do is look - # in the optional stripped state from the invite/knock event. - room_ids_without_results = room_ids.difference( - chain( - room_ids_with_results, - [ - room_id - for room_id, stripped_state_map in room_id_to_stripped_state_map.items() - if stripped_state_map is not None - ], - ) - ) - room_id_to_stripped_state_map.update( - await self._bulk_get_stripped_state_for_rooms_from_sync_room_map( - room_ids_without_results, sync_room_map - ) - ) - - # Update our `room_id_to_content` map based on the stripped state - # (applies to invite/knock rooms) - rooms_ids_without_stripped_state: Set[str] = set() - for room_id in room_ids_without_results: - stripped_state_map = room_id_to_stripped_state_map.get( - room_id, Sentinel.UNSET_SENTINEL - ) - assert stripped_state_map is not Sentinel.UNSET_SENTINEL, ( - f"Stripped state left unset for room {room_id}. " - + "Make sure you're calling `_bulk_get_stripped_state_for_rooms_from_sync_room_map(...)` " - + "with that room_id. (this is a problem with Synapse itself)" - ) - - # If there is some stripped state, we assume the remote server passed *all* - # of the potential stripped state events for the room. - if stripped_state_map is not None: - create_stripped_event = stripped_state_map.get((EventTypes.Create, "")) - stripped_event = stripped_state_map.get((event_type, "")) - # Sanity check that we at-least have the create event - if create_stripped_event is not None: - if stripped_event is not None: - room_id_to_content[room_id] = stripped_event.content.get( - event_content_field - ) - else: - # Didn't see the state event we're looking for in the stripped - # state so we can assume relevant content field is `None`. - room_id_to_content[room_id] = None - else: - rooms_ids_without_stripped_state.add(room_id) - - # Last resort, we might not have current room state for rooms that the - # server has left (no one local is in the room) but we can look at the - # historical state. - # - # Update our `room_id_to_content` map based on the state at the time of - # the membership event. - for room_id in rooms_ids_without_stripped_state: - # TODO: It would be nice to look this up in a bulk way (N+1 queries) - # - # TODO: `get_state_at(...)` doesn't take into account the "current state". - room_state = await self.storage_controllers.state.get_state_at( - room_id=room_id, - stream_position=to_token.copy_and_replace( - StreamKeyType.ROOM, - sync_room_map[room_id].event_pos.to_room_stream_token(), - ), - state_filter=StateFilter.from_types( - [ - (EventTypes.Create, ""), - (event_type, ""), - ] - ), - # Partially-stated rooms should have all state events except for - # remote membership events so we don't need to wait at all because - # we only want the create event and some non-member event. - await_full_state=False, - ) - # We can use the create event as a canary to tell whether the server has - # seen the room before - create_event = room_state.get((EventTypes.Create, "")) - state_event = room_state.get((event_type, "")) - - if create_event is None: - # Skip for unknown rooms - continue - - if state_event is not None: - room_id_to_content[room_id] = state_event.content.get( - event_content_field - ) - else: - # Didn't see the state event we're looking for in the stripped - # state so we can assume relevant content field is `None`. - room_id_to_content[room_id] = None - - return room_id_to_content - - @trace - async def filter_rooms( - self, - user: UserID, - sync_room_map: Dict[str, _RoomMembershipForUser], - filters: SlidingSyncConfig.SlidingSyncList.Filters, - to_token: StreamToken, - ) -> Dict[str, _RoomMembershipForUser]: - """ - Filter rooms based on the sync request. - - Args: - user: User to filter rooms for - sync_room_map: Dictionary of room IDs to sort along with membership - information in the room at the time of `to_token`. - filters: Filters to apply - to_token: We filter based on the state of the room at this token - - Returns: - A filtered dictionary of room IDs along with membership information in the - room at the time of `to_token`. - """ - room_id_to_stripped_state_map: Dict[ - str, Optional[StateMap[StrippedStateEvent]] - ] = {} - - filtered_room_id_set = set(sync_room_map.keys()) - - # Filter for Direct-Message (DM) rooms - if filters.is_dm is not None: - with start_active_span("filters.is_dm"): - if filters.is_dm: - # Only DM rooms please - filtered_room_id_set = { - room_id - for room_id in filtered_room_id_set - if sync_room_map[room_id].is_dm - } - else: - # Only non-DM rooms please - filtered_room_id_set = { - room_id - for room_id in filtered_room_id_set - if not sync_room_map[room_id].is_dm - } - - if filters.spaces is not None: - with start_active_span("filters.spaces"): - raise NotImplementedError() - - # Filter for encrypted rooms - if filters.is_encrypted is not None: - with start_active_span("filters.is_encrypted"): - room_id_to_encryption = ( - await self._bulk_get_partial_current_state_content_for_rooms( - content_type="room_encryption", - room_ids=filtered_room_id_set, - to_token=to_token, - sync_room_map=sync_room_map, - room_id_to_stripped_state_map=room_id_to_stripped_state_map, - ) - ) - - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - encryption = room_id_to_encryption.get( - room_id, ROOM_UNKNOWN_SENTINEL - ) - - # Just remove rooms if we can't determine their encryption status - if encryption is ROOM_UNKNOWN_SENTINEL: - filtered_room_id_set.remove(room_id) - continue - - # If we're looking for encrypted rooms, filter out rooms that are not - # encrypted and vice versa - is_encrypted = encryption is not None - if (filters.is_encrypted and not is_encrypted) or ( - not filters.is_encrypted and is_encrypted - ): - filtered_room_id_set.remove(room_id) - - # Filter for rooms that the user has been invited to - if filters.is_invite is not None: - with start_active_span("filters.is_invite"): - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - room_for_user = sync_room_map[room_id] - # If we're looking for invite rooms, filter out rooms that the user is - # not invited to and vice versa - if ( - filters.is_invite - and room_for_user.membership != Membership.INVITE - ) or ( - not filters.is_invite - and room_for_user.membership == Membership.INVITE - ): - filtered_room_id_set.remove(room_id) - - # Filter by room type (space vs room, etc). A room must match one of the types - # provided in the list. `None` is a valid type for rooms which do not have a - # room type. - if filters.room_types is not None or filters.not_room_types is not None: - with start_active_span("filters.room_types"): - room_id_to_type = ( - await self._bulk_get_partial_current_state_content_for_rooms( - content_type="room_type", - room_ids=filtered_room_id_set, - to_token=to_token, - sync_room_map=sync_room_map, - room_id_to_stripped_state_map=room_id_to_stripped_state_map, - ) - ) - - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - room_type = room_id_to_type.get(room_id, ROOM_UNKNOWN_SENTINEL) - - # Just remove rooms if we can't determine their type - if room_type is ROOM_UNKNOWN_SENTINEL: - filtered_room_id_set.remove(room_id) - continue - - if ( - filters.room_types is not None - and room_type not in filters.room_types - ): - filtered_room_id_set.remove(room_id) - - if ( - filters.not_room_types is not None - and room_type in filters.not_room_types - ): - filtered_room_id_set.remove(room_id) - - if filters.room_name_like is not None: - with start_active_span("filters.room_name_like"): - # TODO: The room name is a bit more sensitive to leak than the - # create/encryption event. Maybe we should consider a better way to fetch - # historical state before implementing this. - # - # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms( - # content_type="room_name", - # room_ids=filtered_room_id_set, - # to_token=to_token, - # sync_room_map=sync_room_map, - # room_id_to_stripped_state_map=room_id_to_stripped_state_map, - # ) - raise NotImplementedError() - - if filters.tags is not None or filters.not_tags is not None: - with start_active_span("filters.tags"): - raise NotImplementedError() - - # Assemble a new sync room map but only with the `filtered_room_id_set` - return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} - - @trace - async def sort_rooms( - self, - sync_room_map: Dict[str, _RoomMembershipForUser], - to_token: StreamToken, - ) -> List[_RoomMembershipForUser]: - """ - Sort by `stream_ordering` of the last event that the user should see in the - room. `stream_ordering` is unique so we get a stable sort. - - Args: - sync_room_map: Dictionary of room IDs to sort along with membership - information in the room at the time of `to_token`. - to_token: We sort based on the events in the room at this token (<= `to_token`) - - Returns: - A sorted list of room IDs by `stream_ordering` along with membership information. - """ - - # Assemble a map of room ID to the `stream_ordering` of the last activity that the - # user should see in the room (<= `to_token`) - last_activity_in_room_map: Dict[str, int] = {} - - for room_id, room_for_user in sync_room_map.items(): - if room_for_user.membership != Membership.JOIN: - # If the user has left/been invited/knocked/been banned from a - # room, they shouldn't see anything past that point. - # - # FIXME: It's possible that people should see beyond this point - # in invited/knocked cases if for example the room has - # `invite`/`world_readable` history visibility, see - # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 - last_activity_in_room_map[room_id] = room_for_user.event_pos.stream - - # For fully-joined rooms, we find the latest activity at/before the - # `to_token`. - joined_room_positions = ( - await self.store.bulk_get_last_event_pos_in_room_before_stream_ordering( - [ - room_id - for room_id, room_for_user in sync_room_map.items() - if room_for_user.membership == Membership.JOIN - ], - to_token.room_key, - ) - ) - - last_activity_in_room_map.update(joined_room_positions) - - return sorted( - sync_room_map.values(), - # Sort by the last activity (stream_ordering) in the room - key=lambda room_info: last_activity_in_room_map[room_info.room_id], - # We want descending order - reverse=True, - ) - @trace async def get_current_state_ids_at( self, diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 599c74429e..f05f45f72c 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -13,7 +13,7 @@ # import logging -from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Sequence, Set +from typing import TYPE_CHECKING, AbstractSet, Dict, Mapping, Optional, Sequence, Set from typing_extensions import assert_never @@ -30,6 +30,7 @@ from synapse.types import ( JsonMapping, MultiWriterStreamToken, SlidingSyncStreamToken, + StrCollection, StreamToken, ) from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult @@ -55,9 +56,9 @@ class SlidingSyncExtensionHandler: sync_config: SlidingSyncConfig, previous_connection_state: "PerConnectionState", new_connection_state: "MutablePerConnectionState", - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], actual_room_ids: Set[str], - actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], + actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], to_token: StreamToken, from_token: Optional[SlidingSyncStreamToken], ) -> SlidingSyncResult.Extensions: @@ -144,10 +145,10 @@ class SlidingSyncExtensionHandler: def find_relevant_room_ids_for_extension( self, - requested_lists: Optional[List[str]], - requested_room_ids: Optional[List[str]], - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], + requested_lists: Optional[StrCollection], + requested_room_ids: Optional[StrCollection], + actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: AbstractSet[str], ) -> Set[str]: """ Handle the reserved `lists`/`rooms` keys for extensions. Extensions should only @@ -343,7 +344,7 @@ class SlidingSyncExtensionHandler: async def get_account_data_extension_response( self, sync_config: SlidingSyncConfig, - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], actual_room_ids: Set[str], account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension, to_token: StreamToken, @@ -436,9 +437,9 @@ class SlidingSyncExtensionHandler: sync_config: SlidingSyncConfig, previous_connection_state: "PerConnectionState", new_connection_state: "MutablePerConnectionState", - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], actual_room_ids: Set[str], - actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], + actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension, to_token: StreamToken, from_token: Optional[SlidingSyncStreamToken], @@ -598,9 +599,9 @@ class SlidingSyncExtensionHandler: async def get_typing_extension_response( self, sync_config: SlidingSyncConfig, - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], + actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], actual_room_ids: Set[str], - actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], + actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], typing_request: SlidingSyncConfig.Extensions.TypingExtension, to_token: StreamToken, from_token: Optional[SlidingSyncStreamToken], diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py new file mode 100644 index 0000000000..4718e8092b --- /dev/null +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -0,0 +1,1353 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + + +import enum +import logging +from itertools import chain +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Literal, + Mapping, + Optional, + Set, + Union, +) + +import attr +from immutabledict import immutabledict +from typing_extensions import assert_never + +from synapse.api.constants import ( + AccountDataTypes, + EventContentFields, + EventTypes, + Membership, +) +from synapse.events import StrippedStateEvent +from synapse.events.utils import parse_stripped_state_event +from synapse.handlers.sliding_sync.types import ( + HaveSentRoomFlag, + PerConnectionState, + RoomSyncConfig, +) +from synapse.logging.opentracing import start_active_span, trace +from synapse.storage.databases.main.state import ( + ROOM_UNKNOWN_SENTINEL, + Sentinel as StateSentinel, +) +from synapse.storage.databases.main.stream import CurrentStateDeltaMembership +from synapse.types import ( + MutableStateMap, + PersistedEventPosition, + RoomStreamToken, + StateMap, + StrCollection, + StreamKeyType, + StreamToken, + UserID, +) +from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult +from synapse.types.state import StateFilter + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +logger = logging.getLogger(__name__) + + +@attr.s(auto_attribs=True, slots=True, frozen=True) +class SlidingSyncInterestedRooms: + """The set of rooms and metadata a client is interested in based on their + sliding sync request. + + Returned by `compute_interested_rooms`. + + Attributes: + lists: A mapping from list name to the list result for the response + relevant_room_map: A map from rooms that match the sync request to + their room sync config. + relevant_rooms_to_send_map: Subset of `relevant_room_map` that + includes the rooms that *may* have relevant updates. Rooms not + in this map will definitely not have room updates (though + extensions may have updates in these rooms). + """ + + lists: Mapping[str, SlidingSyncResult.SlidingWindowList] + relevant_room_map: Mapping[str, RoomSyncConfig] + relevant_rooms_to_send_map: Mapping[str, RoomSyncConfig] + all_rooms: Set[str] + room_membership_for_user_map: Mapping[str, "_RoomMembershipForUser"] + + +class Sentinel(enum.Enum): + # defining a sentinel in this way allows mypy to correctly handle the + # type of a dictionary lookup and subsequent type narrowing. + UNSET_SENTINEL = object() + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _RoomMembershipForUser: + """ + Attributes: + room_id: The room ID of the membership event + event_id: The event ID of the membership event + event_pos: The stream position of the membership event + membership: The membership state of the user in the room + sender: The person who sent the membership event + newly_joined: Whether the user newly joined the room during the given token + range and is still joined to the room at the end of this range. + newly_left: Whether the user newly left (or kicked) the room during the given + token range and is still "leave" at the end of this range. + is_dm: Whether this user considers this room as a direct-message (DM) room + """ + + room_id: str + # Optional because state resets can affect room membership without a corresponding event. + event_id: Optional[str] + # Even during a state reset which removes the user from the room, we expect this to + # be set because `current_state_delta_stream` will note the position that the reset + # happened. + event_pos: PersistedEventPosition + # Even during a state reset which removes the user from the room, we expect this to + # be set to `LEAVE` because we can make that assumption based on the situaton (see + # `get_current_state_delta_membership_changes_for_user(...)`) + membership: str + # Optional because state resets can affect room membership without a corresponding event. + sender: Optional[str] + newly_joined: bool + newly_left: bool + is_dm: bool + + def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser": + return attr.evolve(self, **kwds) + + +def filter_membership_for_sync( + *, user_id: str, room_membership_for_user: _RoomMembershipForUser +) -> bool: + """ + Returns True if the membership event should be included in the sync response, + otherwise False. + + Attributes: + user_id: The user ID that the membership applies to + room_membership_for_user: Membership information for the user in the room + """ + + membership = room_membership_for_user.membership + sender = room_membership_for_user.sender + newly_left = room_membership_for_user.newly_left + + # We want to allow everything except rooms the user has left unless `newly_left` + # because we want everything that's *still* relevant to the user. We include + # `newly_left` rooms because the last event that the user should see is their own + # leave event. + # + # A leave != kick. This logic includes kicks (leave events where the sender is not + # the same user). + # + # When `sender=None`, it means that a state reset happened that removed the user + # from the room without a corresponding leave event. We can just remove the rooms + # since they are no longer relevant to the user but will still appear if they are + # `newly_left`. + return ( + # Anything except leave events + membership != Membership.LEAVE + # Unless... + or newly_left + # Allow kicks + or (membership == Membership.LEAVE and sender not in (user_id, None)) + ) + + +class SlidingSyncRoomLists: + """Handles calculating the room lists from sliding sync requests""" + + def __init__(self, hs: "HomeServer"): + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync + self.is_mine_id = hs.is_mine_id + + async def compute_interested_rooms( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> SlidingSyncInterestedRooms: + """Fetch the set of rooms that match the request""" + + room_membership_for_user_map = ( + await self.get_room_membership_for_user_at_to_token( + sync_config.user, to_token, from_token + ) + ) + + # Assemble sliding window lists + lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} + # Keep track of the rooms that we can display and need to fetch more info about + relevant_room_map: Dict[str, RoomSyncConfig] = {} + # The set of room IDs of all rooms that could appear in any list. These + # include rooms that are outside the list ranges. + all_rooms: Set[str] = set() + + if sync_config.lists: + with start_active_span("assemble_sliding_window_lists"): + sync_room_map = await self.filter_rooms_relevant_for_sync( + user=sync_config.user, + room_membership_for_user_map=room_membership_for_user_map, + ) + + for list_key, list_config in sync_config.lists.items(): + # Apply filters + filtered_sync_room_map = sync_room_map + if list_config.filters is not None: + filtered_sync_room_map = await self.filter_rooms( + sync_config.user, + sync_room_map, + list_config.filters, + to_token, + ) + + # Find which rooms are partially stated and may need to be filtered out + # depending on the `required_state` requested (see below). + partial_state_room_map = ( + await self.store.is_partial_state_room_batched( + filtered_sync_room_map.keys() + ) + ) + + # Since creating the `RoomSyncConfig` takes some work, let's just do it + # once and make a copy whenever we need it. + room_sync_config = RoomSyncConfig.from_room_config(list_config) + + # Exclude partially-stated rooms if we must wait for the room to be + # fully-stated + if room_sync_config.must_await_full_state(self.is_mine_id): + filtered_sync_room_map = { + room_id: room + for room_id, room in filtered_sync_room_map.items() + if not partial_state_room_map.get(room_id) + } + + all_rooms.update(filtered_sync_room_map) + + # Sort the list + sorted_room_info = await self.sort_rooms( + filtered_sync_room_map, to_token + ) + + ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] + if list_config.ranges: + for range in list_config.ranges: + room_ids_in_list: List[str] = [] + + # We're going to loop through the sorted list of rooms starting + # at the range start index and keep adding rooms until we fill + # up the range or run out of rooms. + # + # Both sides of range are inclusive so we `+ 1` + max_num_rooms = range[1] - range[0] + 1 + for room_membership in sorted_room_info[range[0] :]: + room_id = room_membership.room_id + + if len(room_ids_in_list) >= max_num_rooms: + break + + # Take the superset of the `RoomSyncConfig` for each room. + # + # Update our `relevant_room_map` with the room we're going + # to display and need to fetch more info about. + existing_room_sync_config = relevant_room_map.get( + room_id + ) + if existing_room_sync_config is not None: + existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) + else: + # Make a copy so if we modify it later, it doesn't + # affect all references. + relevant_room_map[room_id] = ( + room_sync_config.deep_copy() + ) + + room_ids_in_list.append(room_id) + + ops.append( + SlidingSyncResult.SlidingWindowList.Operation( + op=OperationType.SYNC, + range=range, + room_ids=room_ids_in_list, + ) + ) + + lists[list_key] = SlidingSyncResult.SlidingWindowList( + count=len(sorted_room_info), + ops=ops, + ) + + if sync_config.room_subscriptions: + with start_active_span("assemble_room_subscriptions"): + # Find which rooms are partially stated and may need to be filtered out + # depending on the `required_state` requested (see below). + partial_state_room_map = await self.store.is_partial_state_room_batched( + sync_config.room_subscriptions.keys() + ) + + for ( + room_id, + room_subscription, + ) in sync_config.room_subscriptions.items(): + room_membership_for_user_at_to_token = ( + await self.check_room_subscription_allowed_for_user( + room_id=room_id, + room_membership_for_user_map=room_membership_for_user_map, + to_token=to_token, + ) + ) + + # Skip this room if the user isn't allowed to see it + if not room_membership_for_user_at_to_token: + continue + + all_rooms.add(room_id) + + room_membership_for_user_map[room_id] = ( + room_membership_for_user_at_to_token + ) + + # Take the superset of the `RoomSyncConfig` for each room. + room_sync_config = RoomSyncConfig.from_room_config( + room_subscription + ) + + # Exclude partially-stated rooms if we must wait for the room to be + # fully-stated + if room_sync_config.must_await_full_state(self.is_mine_id): + if partial_state_room_map.get(room_id): + continue + + all_rooms.add(room_id) + + # Update our `relevant_room_map` with the room we're going to display + # and need to fetch more info about. + existing_room_sync_config = relevant_room_map.get(room_id) + if existing_room_sync_config is not None: + existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) + else: + relevant_room_map[room_id] = room_sync_config + + # Filtered subset of `relevant_room_map` for rooms that may have updates + # (in the event stream) + relevant_rooms_to_send_map: Dict[str, RoomSyncConfig] = relevant_room_map + if relevant_room_map: + with start_active_span("filter_relevant_rooms_to_send"): + if from_token: + rooms_should_send = set() + + # First we check if there are rooms that match a list/room + # subscription and have updates we need to send (i.e. either because + # we haven't sent the room down, or we have but there are missing + # updates). + for room_id, room_config in relevant_room_map.items(): + prev_room_sync_config = ( + previous_connection_state.room_configs.get(room_id) + ) + if prev_room_sync_config is not None: + # Always include rooms whose timeline limit has increased. + # (see the "XXX: Odd behavior" described below) + if ( + prev_room_sync_config.timeline_limit + < room_config.timeline_limit + ): + rooms_should_send.add(room_id) + continue + + status = previous_connection_state.rooms.have_sent_room(room_id) + if ( + # The room was never sent down before so the client needs to know + # about it regardless of any updates. + status.status == HaveSentRoomFlag.NEVER + # `PREVIOUSLY` literally means the "room was sent down before *AND* + # there are updates we haven't sent down" so we already know this + # room has updates. + or status.status == HaveSentRoomFlag.PREVIOUSLY + ): + rooms_should_send.add(room_id) + elif status.status == HaveSentRoomFlag.LIVE: + # We know that we've sent all updates up until `from_token`, + # so we just need to check if there have been updates since + # then. + pass + else: + assert_never(status.status) + + # We only need to check for new events since any state changes + # will also come down as new events. + rooms_that_have_updates = ( + self.store.get_rooms_that_might_have_updates( + relevant_room_map.keys(), from_token.room_key + ) + ) + rooms_should_send.update(rooms_that_have_updates) + relevant_rooms_to_send_map = { + room_id: room_sync_config + for room_id, room_sync_config in relevant_room_map.items() + if room_id in rooms_should_send + } + + return SlidingSyncInterestedRooms( + lists=lists, + relevant_room_map=relevant_room_map, + relevant_rooms_to_send_map=relevant_rooms_to_send_map, + all_rooms=all_rooms, + room_membership_for_user_map=room_membership_for_user_map, + ) + + @trace + async def get_room_membership_for_user_at_to_token( + self, + user: UserID, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Dict[str, _RoomMembershipForUser]: + """ + Fetch room IDs that the user has had membership in (the full room list including + long-lost left rooms that will be filtered, sorted, and sliced). + + We're looking for rooms where the user has had any sort of membership in the + token range (> `from_token` and <= `to_token`) + + In order for bans/kicks to not show up, you need to `/forget` those rooms. This + doesn't modify the event itself though and only adds the `forgotten` flag to the + `room_memberships` table in Synapse. There isn't a way to tell when a room was + forgotten at the moment so we can't factor it into the token range. + + Args: + user: User to fetch rooms for + to_token: The token to fetch rooms up to. + from_token: The point in the stream to sync from. + + Returns: + A dictionary of room IDs that the user has had membership in along with + membership information in that room at the time of `to_token`. + """ + user_id = user.to_string() + + # First grab a current snapshot rooms for the user + # (also handles forgotten rooms) + room_for_user_list = await self.store.get_rooms_for_local_user_where_membership_is( + user_id=user_id, + # We want to fetch any kind of membership (joined and left rooms) in order + # to get the `event_pos` of the latest room membership event for the + # user. + membership_list=Membership.LIST, + excluded_rooms=self.rooms_to_exclude_globally, + ) + + # If the user has never joined any rooms before, we can just return an empty list + if not room_for_user_list: + return {} + + # Our working list of rooms that can show up in the sync response + sync_room_id_set = { + # Note: The `room_for_user` we're assigning here will need to be fixed up + # (below) because they are potentially from the current snapshot time + # instead from the time of the `to_token`. + room_for_user.room_id: _RoomMembershipForUser( + room_id=room_for_user.room_id, + event_id=room_for_user.event_id, + event_pos=room_for_user.event_pos, + membership=room_for_user.membership, + sender=room_for_user.sender, + # We will update these fields below to be accurate + newly_joined=False, + newly_left=False, + is_dm=False, + ) + for room_for_user in room_for_user_list + } + + # Get the `RoomStreamToken` that represents the spot we queried up to when we got + # our membership snapshot from `get_rooms_for_local_user_where_membership_is()`. + # + # First, we need to get the max stream_ordering of each event persister instance + # that we queried events from. + instance_to_max_stream_ordering_map: Dict[str, int] = {} + for room_for_user in room_for_user_list: + instance_name = room_for_user.event_pos.instance_name + stream_ordering = room_for_user.event_pos.stream + + current_instance_max_stream_ordering = ( + instance_to_max_stream_ordering_map.get(instance_name) + ) + if ( + current_instance_max_stream_ordering is None + or stream_ordering > current_instance_max_stream_ordering + ): + instance_to_max_stream_ordering_map[instance_name] = stream_ordering + + # Then assemble the `RoomStreamToken` + min_stream_pos = min(instance_to_max_stream_ordering_map.values()) + membership_snapshot_token = RoomStreamToken( + # Minimum position in the `instance_map` + stream=min_stream_pos, + instance_map=immutabledict( + { + instance_name: stream_pos + for instance_name, stream_pos in instance_to_max_stream_ordering_map.items() + if stream_pos > min_stream_pos + } + ), + ) + + # Since we fetched the users room list at some point in time after the from/to + # tokens, we need to revert/rewind some membership changes to match the point in + # time of the `to_token`. In particular, we need to make these fixups: + # + # - 1a) Remove rooms that the user joined after the `to_token` + # - 1b) Add back rooms that the user left after the `to_token` + # - 1c) Update room membership events to the point in time of the `to_token` + # - 2) Figure out which rooms are `newly_left` rooms (> `from_token` and <= `to_token`) + # - 3) Figure out which rooms are `newly_joined` (> `from_token` and <= `to_token`) + # - 4) Figure out which rooms are DM's + + # 1) Fetch membership changes that fall in the range from `to_token` up to + # `membership_snapshot_token` + # + # If our `to_token` is already the same or ahead of the latest room membership + # for the user, we don't need to do any "2)" fix-ups and can just straight-up + # use the room list from the snapshot as a base (nothing has changed) + current_state_delta_membership_changes_after_to_token = [] + if not membership_snapshot_token.is_before_or_eq(to_token.room_key): + current_state_delta_membership_changes_after_to_token = ( + await self.store.get_current_state_delta_membership_changes_for_user( + user_id, + from_key=to_token.room_key, + to_key=membership_snapshot_token, + excluded_room_ids=self.rooms_to_exclude_globally, + ) + ) + + # 1) Assemble a list of the first membership event after the `to_token` so we can + # step backward to the previous membership that would apply to the from/to + # range. + first_membership_change_by_room_id_after_to_token: Dict[ + str, CurrentStateDeltaMembership + ] = {} + for membership_change in current_state_delta_membership_changes_after_to_token: + # Only set if we haven't already set it + first_membership_change_by_room_id_after_to_token.setdefault( + membership_change.room_id, membership_change + ) + + # 1) Fixup + # + # Since we fetched a snapshot of the users room list at some point in time after + # the from/to tokens, we need to revert/rewind some membership changes to match + # the point in time of the `to_token`. + for ( + room_id, + first_membership_change_after_to_token, + ) in first_membership_change_by_room_id_after_to_token.items(): + # 1a) Remove rooms that the user joined after the `to_token` + if first_membership_change_after_to_token.prev_event_id is None: + sync_room_id_set.pop(room_id, None) + # 1b) 1c) From the first membership event after the `to_token`, step backward to the + # previous membership that would apply to the from/to range. + else: + # We don't expect these fields to be `None` if we have a `prev_event_id` + # but we're being defensive since it's possible that the prev event was + # culled from the database. + if ( + first_membership_change_after_to_token.prev_event_pos is not None + and first_membership_change_after_to_token.prev_membership + is not None + ): + sync_room_id_set[room_id] = _RoomMembershipForUser( + room_id=room_id, + event_id=first_membership_change_after_to_token.prev_event_id, + event_pos=first_membership_change_after_to_token.prev_event_pos, + membership=first_membership_change_after_to_token.prev_membership, + sender=first_membership_change_after_to_token.prev_sender, + # We will update these fields below to be accurate + newly_joined=False, + newly_left=False, + is_dm=False, + ) + else: + # If we can't find the previous membership event, we shouldn't + # include the room in the sync response since we can't determine the + # exact membership state and shouldn't rely on the current snapshot. + sync_room_id_set.pop(room_id, None) + + # 2) Fetch membership changes that fall in the range from `from_token` up to `to_token` + current_state_delta_membership_changes_in_from_to_range = [] + if from_token: + current_state_delta_membership_changes_in_from_to_range = ( + await self.store.get_current_state_delta_membership_changes_for_user( + user_id, + from_key=from_token.room_key, + to_key=to_token.room_key, + excluded_room_ids=self.rooms_to_exclude_globally, + ) + ) + + # 2) Assemble a list of the last membership events in some given ranges. Someone + # could have left and joined multiple times during the given range but we only + # care about end-result so we grab the last one. + last_membership_change_by_room_id_in_from_to_range: Dict[ + str, CurrentStateDeltaMembership + ] = {} + # We also want to assemble a list of the first membership events during the token + # range so we can step backward to the previous membership that would apply to + # before the token range to see if we have `newly_joined` the room. + first_membership_change_by_room_id_in_from_to_range: Dict[ + str, CurrentStateDeltaMembership + ] = {} + # Keep track if the room has a non-join event in the token range so we can later + # tell if it was a `newly_joined` room. If the last membership event in the + # token range is a join and there is also some non-join in the range, we know + # they `newly_joined`. + has_non_join_event_by_room_id_in_from_to_range: Dict[str, bool] = {} + for ( + membership_change + ) in current_state_delta_membership_changes_in_from_to_range: + room_id = membership_change.room_id + + last_membership_change_by_room_id_in_from_to_range[room_id] = ( + membership_change + ) + # Only set if we haven't already set it + first_membership_change_by_room_id_in_from_to_range.setdefault( + room_id, membership_change + ) + + if membership_change.membership != Membership.JOIN: + has_non_join_event_by_room_id_in_from_to_range[room_id] = True + + # 2) Fixup + # + # 3) We also want to assemble a list of possibly newly joined rooms. Someone + # could have left and joined multiple times during the given range but we only + # care about whether they are joined at the end of the token range so we are + # working with the last membership even in the token range. + possibly_newly_joined_room_ids = set() + for ( + last_membership_change_in_from_to_range + ) in last_membership_change_by_room_id_in_from_to_range.values(): + room_id = last_membership_change_in_from_to_range.room_id + + # 3) + if last_membership_change_in_from_to_range.membership == Membership.JOIN: + possibly_newly_joined_room_ids.add(room_id) + + # 2) Figure out newly_left rooms (> `from_token` and <= `to_token`). + if last_membership_change_in_from_to_range.membership == Membership.LEAVE: + # 2) Mark this room as `newly_left` + + # If we're seeing a membership change here, we should expect to already + # have it in our snapshot but if a state reset happens, it wouldn't have + # shown up in our snapshot but appear as a change here. + existing_sync_entry = sync_room_id_set.get(room_id) + if existing_sync_entry is not None: + # Normal expected case + sync_room_id_set[room_id] = existing_sync_entry.copy_and_replace( + newly_left=True + ) + else: + # State reset! + logger.warn( + "State reset detected for room_id %s with %s who is no longer in the room", + room_id, + user_id, + ) + # Even though a state reset happened which removed the person from + # the room, we still add it the list so the user knows they left the + # room. Downstream code can check for a state reset by looking for + # `event_id=None and membership is not None`. + sync_room_id_set[room_id] = _RoomMembershipForUser( + room_id=room_id, + event_id=last_membership_change_in_from_to_range.event_id, + event_pos=last_membership_change_in_from_to_range.event_pos, + membership=last_membership_change_in_from_to_range.membership, + sender=last_membership_change_in_from_to_range.sender, + newly_joined=False, + newly_left=True, + is_dm=False, + ) + + # 3) Figure out `newly_joined` + for room_id in possibly_newly_joined_room_ids: + has_non_join_in_from_to_range = ( + has_non_join_event_by_room_id_in_from_to_range.get(room_id, False) + ) + # If the last membership event in the token range is a join and there is + # also some non-join in the range, we know they `newly_joined`. + if has_non_join_in_from_to_range: + # We found a `newly_joined` room (we left and joined within the token range) + sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( + newly_joined=True + ) + else: + prev_event_id = first_membership_change_by_room_id_in_from_to_range[ + room_id + ].prev_event_id + prev_membership = first_membership_change_by_room_id_in_from_to_range[ + room_id + ].prev_membership + + if prev_event_id is None: + # We found a `newly_joined` room (we are joining the room for the + # first time within the token range) + sync_room_id_set[room_id] = sync_room_id_set[ + room_id + ].copy_and_replace(newly_joined=True) + # Last resort, we need to step back to the previous membership event + # just before the token range to see if we're joined then or not. + elif prev_membership != Membership.JOIN: + # We found a `newly_joined` room (we left before the token range + # and joined within the token range) + sync_room_id_set[room_id] = sync_room_id_set[ + room_id + ].copy_and_replace(newly_joined=True) + + # 4) Figure out which rooms the user considers to be direct-message (DM) rooms + # + # We're using global account data (`m.direct`) instead of checking for + # `is_direct` on membership events because that property only appears for + # the invitee membership event (doesn't show up for the inviter). + # + # We're unable to take `to_token` into account for global account data since + # we only keep track of the latest account data for the user. + dm_map = await self.store.get_global_account_data_by_type_for_user( + user_id, AccountDataTypes.DIRECT + ) + + # Flatten out the map. Account data is set by the client so it needs to be + # scrutinized. + dm_room_id_set = set() + if isinstance(dm_map, dict): + for room_ids in dm_map.values(): + # Account data should be a list of room IDs. Ignore anything else + if isinstance(room_ids, list): + for room_id in room_ids: + if isinstance(room_id, str): + dm_room_id_set.add(room_id) + + # 4) Fixup + for room_id in sync_room_id_set: + sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( + is_dm=room_id in dm_room_id_set + ) + + return sync_room_id_set + + @trace + async def filter_rooms_relevant_for_sync( + self, + user: UserID, + room_membership_for_user_map: Dict[str, _RoomMembershipForUser], + ) -> Dict[str, _RoomMembershipForUser]: + """ + Filter room IDs that should/can be listed for this user in the sync response (the + full room list that will be further filtered, sorted, and sliced). + + We're looking for rooms where the user has the following state in the token + range (> `from_token` and <= `to_token`): + + - `invite`, `join`, `knock`, `ban` membership events + - Kicks (`leave` membership events where `sender` is different from the + `user_id`/`state_key`) + - `newly_left` (rooms that were left during the given token range) + - In order for bans/kicks to not show up in sync, you need to `/forget` those + rooms. This doesn't modify the event itself though and only adds the + `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way + to tell when a room was forgotten at the moment so we can't factor it into the + from/to range. + + Args: + user: User that is syncing + room_membership_for_user_map: Room membership for the user + + Returns: + A dictionary of room IDs that should be listed in the sync response along + with membership information in that room at the time of `to_token`. + """ + user_id = user.to_string() + + # Filter rooms to only what we're interested to sync with + filtered_sync_room_map = { + room_id: room_membership_for_user + for room_id, room_membership_for_user in room_membership_for_user_map.items() + if filter_membership_for_sync( + user_id=user_id, + room_membership_for_user=room_membership_for_user, + ) + } + + return filtered_sync_room_map + + async def check_room_subscription_allowed_for_user( + self, + room_id: str, + room_membership_for_user_map: Dict[str, _RoomMembershipForUser], + to_token: StreamToken, + ) -> Optional[_RoomMembershipForUser]: + """ + Check whether the user is allowed to see the room based on whether they have + ever had membership in the room or if the room is `world_readable`. + + Similar to `check_user_in_room_or_world_readable(...)` + + Args: + room_id: Room to check + room_membership_for_user_map: Room membership for the user at the time of + the `to_token` (<= `to_token`). + to_token: The token to fetch rooms up to. + + Returns: + The room membership for the user if they are allowed to subscribe to the + room else `None`. + """ + + # We can first check if they are already allowed to see the room based + # on our previous work to assemble the `room_membership_for_user_map`. + # + # If they have had any membership in the room over time (up to the `to_token`), + # let them subscribe and see what they can. + existing_membership_for_user = room_membership_for_user_map.get(room_id) + if existing_membership_for_user is not None: + return existing_membership_for_user + + # TODO: Handle `world_readable` rooms + return None + + # If the room is `world_readable`, it doesn't matter whether they can join, + # everyone can see the room. + # not_in_room_membership_for_user = _RoomMembershipForUser( + # room_id=room_id, + # event_id=None, + # event_pos=None, + # membership=None, + # sender=None, + # newly_joined=False, + # newly_left=False, + # is_dm=False, + # ) + # room_state = await self.get_current_state_at( + # room_id=room_id, + # room_membership_for_user_at_to_token=not_in_room_membership_for_user, + # state_filter=StateFilter.from_types( + # [(EventTypes.RoomHistoryVisibility, "")] + # ), + # to_token=to_token, + # ) + + # visibility_event = room_state.get((EventTypes.RoomHistoryVisibility, "")) + # if ( + # visibility_event is not None + # and visibility_event.content.get("history_visibility") + # == HistoryVisibility.WORLD_READABLE + # ): + # return not_in_room_membership_for_user + + # return None + + @trace + async def _bulk_get_stripped_state_for_rooms_from_sync_room_map( + self, + room_ids: StrCollection, + sync_room_map: Dict[str, _RoomMembershipForUser], + ) -> Dict[str, Optional[StateMap[StrippedStateEvent]]]: + """ + Fetch stripped state for a list of room IDs. Stripped state is only + applicable to invite/knock rooms. Other rooms will have `None` as their + stripped state. + + For invite rooms, we pull from `unsigned.invite_room_state`. + For knock rooms, we pull from `unsigned.knock_room_state`. + + Args: + room_ids: Room IDs to fetch stripped state for + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + + Returns: + Mapping from room_id to mapping of (type, state_key) to stripped state + event. + """ + room_id_to_stripped_state_map: Dict[ + str, Optional[StateMap[StrippedStateEvent]] + ] = {} + + # Fetch what we haven't before + room_ids_to_fetch = [ + room_id + for room_id in room_ids + if room_id not in room_id_to_stripped_state_map + ] + + # Gather a list of event IDs we can grab stripped state from + invite_or_knock_event_ids: List[str] = [] + for room_id in room_ids_to_fetch: + if sync_room_map[room_id].membership in ( + Membership.INVITE, + Membership.KNOCK, + ): + event_id = sync_room_map[room_id].event_id + # If this is an invite/knock then there should be an event_id + assert event_id is not None + invite_or_knock_event_ids.append(event_id) + else: + room_id_to_stripped_state_map[room_id] = None + + invite_or_knock_events = await self.store.get_events(invite_or_knock_event_ids) + for invite_or_knock_event in invite_or_knock_events.values(): + room_id = invite_or_knock_event.room_id + membership = invite_or_knock_event.membership + + raw_stripped_state_events = None + if membership == Membership.INVITE: + invite_room_state = invite_or_knock_event.unsigned.get( + "invite_room_state" + ) + raw_stripped_state_events = invite_room_state + elif membership == Membership.KNOCK: + knock_room_state = invite_or_knock_event.unsigned.get( + "knock_room_state" + ) + raw_stripped_state_events = knock_room_state + else: + raise AssertionError( + f"Unexpected membership {membership} (this is a problem with Synapse itself)" + ) + + stripped_state_map: Optional[MutableStateMap[StrippedStateEvent]] = None + # Scrutinize unsigned things. `raw_stripped_state_events` should be a list + # of stripped events + if raw_stripped_state_events is not None: + stripped_state_map = {} + if isinstance(raw_stripped_state_events, list): + for raw_stripped_event in raw_stripped_state_events: + stripped_state_event = parse_stripped_state_event( + raw_stripped_event + ) + if stripped_state_event is not None: + stripped_state_map[ + ( + stripped_state_event.type, + stripped_state_event.state_key, + ) + ] = stripped_state_event + + room_id_to_stripped_state_map[room_id] = stripped_state_map + + return room_id_to_stripped_state_map + + @trace + async def _bulk_get_partial_current_state_content_for_rooms( + self, + content_type: Literal[ + # `content.type` from `EventTypes.Create`` + "room_type", + # `content.algorithm` from `EventTypes.RoomEncryption` + "room_encryption", + ], + room_ids: Set[str], + sync_room_map: Dict[str, _RoomMembershipForUser], + to_token: StreamToken, + room_id_to_stripped_state_map: Dict[ + str, Optional[StateMap[StrippedStateEvent]] + ], + ) -> Mapping[str, Union[Optional[str], StateSentinel]]: + """ + Get the given state event content for a list of rooms. First we check the + current state of the room, then fallback to stripped state if available, then + historical state. + + Args: + content_type: Which content to grab + room_ids: Room IDs to fetch the given content field for. + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + to_token: We filter based on the state of the room at this token + room_id_to_stripped_state_map: This does not need to be filled in before + calling this function. Mapping from room_id to mapping of (type, state_key) + to stripped state event. Modified in place when we fetch new rooms so we can + save work next time this function is called. + + Returns: + A mapping from room ID to the state event content if the room has + the given state event (event_type, ""), otherwise `None`. Rooms unknown to + this server will return `ROOM_UNKNOWN_SENTINEL`. + """ + room_id_to_content: Dict[str, Union[Optional[str], StateSentinel]] = {} + + # As a bulk shortcut, use the current state if the server is particpating in the + # room (meaning we have current state). Ideally, for leave/ban rooms, we would + # want the state at the time of the membership instead of current state to not + # leak anything but we consider the create/encryption stripped state events to + # not be a secret given they are often set at the start of the room and they are + # normally handed out on invite/knock. + # + # Be mindful to only use this for non-sensitive details. For example, even + # though the room name/avatar/topic are also stripped state, they seem a lot + # more senstive to leak the current state value of. + # + # Since this function is cached, we need to make a mutable copy via + # `dict(...)`. + event_type = "" + event_content_field = "" + if content_type == "room_type": + event_type = EventTypes.Create + event_content_field = EventContentFields.ROOM_TYPE + room_id_to_content = dict(await self.store.bulk_get_room_type(room_ids)) + elif content_type == "room_encryption": + event_type = EventTypes.RoomEncryption + event_content_field = EventContentFields.ENCRYPTION_ALGORITHM + room_id_to_content = dict( + await self.store.bulk_get_room_encryption(room_ids) + ) + else: + assert_never(content_type) + + room_ids_with_results = [ + room_id + for room_id, content_field in room_id_to_content.items() + if content_field is not ROOM_UNKNOWN_SENTINEL + ] + + # We might not have current room state for remote invite/knocks if we are + # the first person on our server to see the room. The best we can do is look + # in the optional stripped state from the invite/knock event. + room_ids_without_results = room_ids.difference( + chain( + room_ids_with_results, + [ + room_id + for room_id, stripped_state_map in room_id_to_stripped_state_map.items() + if stripped_state_map is not None + ], + ) + ) + room_id_to_stripped_state_map.update( + await self._bulk_get_stripped_state_for_rooms_from_sync_room_map( + room_ids_without_results, sync_room_map + ) + ) + + # Update our `room_id_to_content` map based on the stripped state + # (applies to invite/knock rooms) + rooms_ids_without_stripped_state: Set[str] = set() + for room_id in room_ids_without_results: + stripped_state_map = room_id_to_stripped_state_map.get( + room_id, Sentinel.UNSET_SENTINEL + ) + assert stripped_state_map is not Sentinel.UNSET_SENTINEL, ( + f"Stripped state left unset for room {room_id}. " + + "Make sure you're calling `_bulk_get_stripped_state_for_rooms_from_sync_room_map(...)` " + + "with that room_id. (this is a problem with Synapse itself)" + ) + + # If there is some stripped state, we assume the remote server passed *all* + # of the potential stripped state events for the room. + if stripped_state_map is not None: + create_stripped_event = stripped_state_map.get((EventTypes.Create, "")) + stripped_event = stripped_state_map.get((event_type, "")) + # Sanity check that we at-least have the create event + if create_stripped_event is not None: + if stripped_event is not None: + room_id_to_content[room_id] = stripped_event.content.get( + event_content_field + ) + else: + # Didn't see the state event we're looking for in the stripped + # state so we can assume relevant content field is `None`. + room_id_to_content[room_id] = None + else: + rooms_ids_without_stripped_state.add(room_id) + + # Last resort, we might not have current room state for rooms that the + # server has left (no one local is in the room) but we can look at the + # historical state. + # + # Update our `room_id_to_content` map based on the state at the time of + # the membership event. + for room_id in rooms_ids_without_stripped_state: + # TODO: It would be nice to look this up in a bulk way (N+1 queries) + # + # TODO: `get_state_at(...)` doesn't take into account the "current state". + room_state = await self.storage_controllers.state.get_state_at( + room_id=room_id, + stream_position=to_token.copy_and_replace( + StreamKeyType.ROOM, + sync_room_map[room_id].event_pos.to_room_stream_token(), + ), + state_filter=StateFilter.from_types( + [ + (EventTypes.Create, ""), + (event_type, ""), + ] + ), + # Partially-stated rooms should have all state events except for + # remote membership events so we don't need to wait at all because + # we only want the create event and some non-member event. + await_full_state=False, + ) + # We can use the create event as a canary to tell whether the server has + # seen the room before + create_event = room_state.get((EventTypes.Create, "")) + state_event = room_state.get((event_type, "")) + + if create_event is None: + # Skip for unknown rooms + continue + + if state_event is not None: + room_id_to_content[room_id] = state_event.content.get( + event_content_field + ) + else: + # Didn't see the state event we're looking for in the stripped + # state so we can assume relevant content field is `None`. + room_id_to_content[room_id] = None + + return room_id_to_content + + @trace + async def filter_rooms( + self, + user: UserID, + sync_room_map: Dict[str, _RoomMembershipForUser], + filters: SlidingSyncConfig.SlidingSyncList.Filters, + to_token: StreamToken, + ) -> Dict[str, _RoomMembershipForUser]: + """ + Filter rooms based on the sync request. + + Args: + user: User to filter rooms for + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + filters: Filters to apply + to_token: We filter based on the state of the room at this token + + Returns: + A filtered dictionary of room IDs along with membership information in the + room at the time of `to_token`. + """ + room_id_to_stripped_state_map: Dict[ + str, Optional[StateMap[StrippedStateEvent]] + ] = {} + + filtered_room_id_set = set(sync_room_map.keys()) + + # Filter for Direct-Message (DM) rooms + if filters.is_dm is not None: + with start_active_span("filters.is_dm"): + if filters.is_dm: + # Only DM rooms please + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if sync_room_map[room_id].is_dm + } + else: + # Only non-DM rooms please + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if not sync_room_map[room_id].is_dm + } + + if filters.spaces is not None: + with start_active_span("filters.spaces"): + raise NotImplementedError() + + # Filter for encrypted rooms + if filters.is_encrypted is not None: + with start_active_span("filters.is_encrypted"): + room_id_to_encryption = ( + await self._bulk_get_partial_current_state_content_for_rooms( + content_type="room_encryption", + room_ids=filtered_room_id_set, + to_token=to_token, + sync_room_map=sync_room_map, + room_id_to_stripped_state_map=room_id_to_stripped_state_map, + ) + ) + + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + encryption = room_id_to_encryption.get( + room_id, ROOM_UNKNOWN_SENTINEL + ) + + # Just remove rooms if we can't determine their encryption status + if encryption is ROOM_UNKNOWN_SENTINEL: + filtered_room_id_set.remove(room_id) + continue + + # If we're looking for encrypted rooms, filter out rooms that are not + # encrypted and vice versa + is_encrypted = encryption is not None + if (filters.is_encrypted and not is_encrypted) or ( + not filters.is_encrypted and is_encrypted + ): + filtered_room_id_set.remove(room_id) + + # Filter for rooms that the user has been invited to + if filters.is_invite is not None: + with start_active_span("filters.is_invite"): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + room_for_user = sync_room_map[room_id] + # If we're looking for invite rooms, filter out rooms that the user is + # not invited to and vice versa + if ( + filters.is_invite + and room_for_user.membership != Membership.INVITE + ) or ( + not filters.is_invite + and room_for_user.membership == Membership.INVITE + ): + filtered_room_id_set.remove(room_id) + + # Filter by room type (space vs room, etc). A room must match one of the types + # provided in the list. `None` is a valid type for rooms which do not have a + # room type. + if filters.room_types is not None or filters.not_room_types is not None: + with start_active_span("filters.room_types"): + room_id_to_type = ( + await self._bulk_get_partial_current_state_content_for_rooms( + content_type="room_type", + room_ids=filtered_room_id_set, + to_token=to_token, + sync_room_map=sync_room_map, + room_id_to_stripped_state_map=room_id_to_stripped_state_map, + ) + ) + + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + room_type = room_id_to_type.get(room_id, ROOM_UNKNOWN_SENTINEL) + + # Just remove rooms if we can't determine their type + if room_type is ROOM_UNKNOWN_SENTINEL: + filtered_room_id_set.remove(room_id) + continue + + if ( + filters.room_types is not None + and room_type not in filters.room_types + ): + filtered_room_id_set.remove(room_id) + + if ( + filters.not_room_types is not None + and room_type in filters.not_room_types + ): + filtered_room_id_set.remove(room_id) + + if filters.room_name_like is not None: + with start_active_span("filters.room_name_like"): + # TODO: The room name is a bit more sensitive to leak than the + # create/encryption event. Maybe we should consider a better way to fetch + # historical state before implementing this. + # + # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms( + # content_type="room_name", + # room_ids=filtered_room_id_set, + # to_token=to_token, + # sync_room_map=sync_room_map, + # room_id_to_stripped_state_map=room_id_to_stripped_state_map, + # ) + raise NotImplementedError() + + if filters.tags is not None or filters.not_tags is not None: + with start_active_span("filters.tags"): + raise NotImplementedError() + + # Assemble a new sync room map but only with the `filtered_room_id_set` + return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} + + @trace + async def sort_rooms( + self, + sync_room_map: Dict[str, _RoomMembershipForUser], + to_token: StreamToken, + ) -> List[_RoomMembershipForUser]: + """ + Sort by `stream_ordering` of the last event that the user should see in the + room. `stream_ordering` is unique so we get a stable sort. + + Args: + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + to_token: We sort based on the events in the room at this token (<= `to_token`) + + Returns: + A sorted list of room IDs by `stream_ordering` along with membership information. + """ + + # Assemble a map of room ID to the `stream_ordering` of the last activity that the + # user should see in the room (<= `to_token`) + last_activity_in_room_map: Dict[str, int] = {} + + for room_id, room_for_user in sync_room_map.items(): + if room_for_user.membership != Membership.JOIN: + # If the user has left/been invited/knocked/been banned from a + # room, they shouldn't see anything past that point. + # + # FIXME: It's possible that people should see beyond this point + # in invited/knocked cases if for example the room has + # `invite`/`world_readable` history visibility, see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 + last_activity_in_room_map[room_id] = room_for_user.event_pos.stream + + # For fully-joined rooms, we find the latest activity at/before the + # `to_token`. + joined_room_positions = ( + await self.store.bulk_get_last_event_pos_in_room_before_stream_ordering( + [ + room_id + for room_id, room_for_user in sync_room_map.items() + if room_for_user.membership == Membership.JOIN + ], + to_token.room_key, + ) + ) + + last_activity_in_room_map.update(joined_room_positions) + + return sorted( + sync_room_map.values(), + # Sort by the last activity (stream_ordering) in the room + key=lambda room_info: last_activity_in_room_map[room_info.room_id], + # We want descending order + reverse=True, + ) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 21b90b0674..22c85e497a 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -21,7 +21,7 @@ import itertools import logging from collections import defaultdict -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState from synapse.api.errors import Codes, StoreError, SynapseError @@ -975,7 +975,7 @@ class SlidingSyncRestServlet(RestServlet): return response def encode_lists( - self, lists: Dict[str, SlidingSyncResult.SlidingWindowList] + self, lists: Mapping[str, SlidingSyncResult.SlidingWindowList] ) -> JsonDict: def encode_operation( operation: SlidingSyncResult.SlidingWindowList.Operation, diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 580342d98a..b303bb1f96 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -409,7 +409,7 @@ class SlidingSyncResult: ) next_pos: SlidingSyncStreamToken - lists: Dict[str, SlidingWindowList] + lists: Mapping[str, SlidingWindowList] rooms: Dict[str, RoomResult] extensions: Extensions diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 96da47f3b9..2cf2f2982f 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -620,7 +620,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): now_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=now_token, to_token=now_token, @@ -647,7 +647,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_token, to_token=after_room_token, @@ -682,7 +682,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room_token, to_token=after_room_token, @@ -756,7 +756,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_token, to_token=after_room_token, @@ -828,7 +828,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_kick_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_kick_token, to_token=after_kick_token, @@ -921,7 +921,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(channel.code, 200, channel.result) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_forgets, to_token=before_room_forgets, @@ -951,7 +951,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room2_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room2_token, @@ -1001,7 +1001,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.helper.join(room_id2, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1041,7 +1041,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1088,7 +1088,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1152,7 +1152,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): leave_response = self.helper.leave(kick_room_id, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_kick_token, to_token=after_kick_token, @@ -1208,7 +1208,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1263,7 +1263,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1322,7 +1322,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.helper.join(room_id2, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=None, to_token=after_room1_token, @@ -1404,7 +1404,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.helper.join(room_id4, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=from_token, to_token=to_token, @@ -1477,7 +1477,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1520,7 +1520,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.helper.join(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1570,7 +1570,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1632,7 +1632,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1691,7 +1691,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1765,7 +1765,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -1830,7 +1830,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_change1_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_change1_token, @@ -1902,7 +1902,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room1_token, @@ -1984,7 +1984,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.helper.leave(room_id1, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -2052,7 +2052,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -2088,7 +2088,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_more_changes_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_more_changes_token, @@ -2153,7 +2153,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room1_token = self.event_sources.get_current_token() room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, to_token=after_room1_token, @@ -2229,7 +2229,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.helper.leave(room_id3, user1_id, tok=user1_tok) room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room3_token, to_token=after_room3_token, @@ -2365,7 +2365,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # The function under test room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_reset_token, to_token=after_reset_token, @@ -2579,7 +2579,7 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa # The function under test room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_stuck_activity_token, to_token=stuck_activity_token, @@ -2669,14 +2669,14 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): Get the rooms the user should be syncing with """ room_membership_for_user_map = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( user=user, from_token=from_token, to_token=to_token, ) ) filtered_sync_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms_relevant_for_sync( + self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync( user=user, room_membership_for_user_map=room_membership_for_user_map, ) @@ -3030,14 +3030,14 @@ class FilterRoomsTestCase(HomeserverTestCase): Get the rooms the user should be syncing with """ room_membership_for_user_map = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( user=user, from_token=from_token, to_token=to_token, ) ) filtered_sync_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms_relevant_for_sync( + self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync( user=user, room_membership_for_user_map=room_membership_for_user_map, ) @@ -3196,7 +3196,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_dm=True` truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3210,7 +3210,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_dm=False` falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3252,7 +3252,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=True` truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3266,7 +3266,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=False` falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3316,7 +3316,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=True` truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3330,7 +3330,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=False` falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3390,7 +3390,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=True` truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3404,7 +3404,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=False` falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3463,7 +3463,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=True` truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3484,7 +3484,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=False` falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3533,7 +3533,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=True` truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3549,7 +3549,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=False` falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3619,7 +3619,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=True` truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3637,7 +3637,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=False` falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3700,7 +3700,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=True` truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3716,7 +3716,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_encrypted=False` falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3760,7 +3760,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_invite=True` truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3774,7 +3774,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try with `is_invite=False` falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3827,7 +3827,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only normal rooms filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), @@ -3839,7 +3839,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only spaces filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), @@ -3851,7 +3851,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding normal rooms and spaces filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3865,7 +3865,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding an arbitrary room type filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3918,7 +3918,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding *NOT* normal rooms filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(not_room_types=[None]), @@ -3930,7 +3930,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding *NOT* spaces filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3944,7 +3944,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding *NOT* normal rooms or spaces filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3959,7 +3959,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Test how it behaves when we have both `room_types` and `not_room_types`. # `not_room_types` should win. filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -3975,7 +3975,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Test how it behaves when we have both `room_types` and `not_room_types`. # `not_room_types` should win. filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters( @@ -4025,7 +4025,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only normal rooms filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), @@ -4037,7 +4037,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only spaces filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), @@ -4094,7 +4094,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only normal rooms filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), @@ -4106,7 +4106,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only spaces filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), @@ -4152,7 +4152,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only normal rooms filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), @@ -4166,7 +4166,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only spaces filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), @@ -4228,7 +4228,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only normal rooms filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), @@ -4242,7 +4242,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only spaces filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), @@ -4305,7 +4305,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only normal rooms filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), @@ -4319,7 +4319,7 @@ class FilterRoomsTestCase(HomeserverTestCase): # Try finding only spaces filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( + self.sliding_sync_handler.room_lists.filter_rooms( UserID.from_string(user1_id), sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), @@ -4366,14 +4366,14 @@ class SortRoomsTestCase(HomeserverTestCase): Get the rooms the user should be syncing with """ room_membership_for_user_map = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( user=user, from_token=from_token, to_token=to_token, ) ) filtered_sync_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms_relevant_for_sync( + self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync( user=user, room_membership_for_user_map=room_membership_for_user_map, ) @@ -4408,7 +4408,7 @@ class SortRoomsTestCase(HomeserverTestCase): # Sort the rooms (what we're testing) sorted_sync_rooms = self.get_success( - self.sliding_sync_handler.sort_rooms( + self.sliding_sync_handler.room_lists.sort_rooms( sync_room_map=sync_room_map, to_token=after_rooms_token, ) @@ -4489,7 +4489,7 @@ class SortRoomsTestCase(HomeserverTestCase): # Sort the rooms (what we're testing) sorted_sync_rooms = self.get_success( - self.sliding_sync_handler.sort_rooms( + self.sliding_sync_handler.room_lists.sort_rooms( sync_room_map=sync_room_map, to_token=after_rooms_token, ) @@ -4553,7 +4553,7 @@ class SortRoomsTestCase(HomeserverTestCase): # Sort the rooms (what we're testing) sorted_sync_rooms = self.get_success( - self.sliding_sync_handler.sort_rooms( + self.sliding_sync_handler.room_lists.sort_rooms( sync_room_map=sync_room_map, to_token=after_rooms_token, ) From 573c6d7e697bf39ad99dd81dd1b0d9c112f5e5ec Mon Sep 17 00:00:00 2001 From: Till <2353100+S7evinK@users.noreply.github.com> Date: Thu, 29 Aug 2024 09:25:10 +0200 Subject: [PATCH 162/332] Use `max_upload_size` as the limit when following the `Location` header (#17543) Otherwise we use the `expected_size` from the initial federation request, which might be far too low. ### Pull Request Checklist * [x] Pull request is based on the develop branch * [x] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [x] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --------- Co-authored-by: Erik Johnston --- changelog.d/17543.bugfix | 1 + synapse/http/matrixfederationclient.py | 6 +- tests/http/test_matrixfederationclient.py | 78 +++++++++++++++++++++++ 3 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17543.bugfix diff --git a/changelog.d/17543.bugfix b/changelog.d/17543.bugfix new file mode 100644 index 0000000000..152b305e58 --- /dev/null +++ b/changelog.d/17543.bugfix @@ -0,0 +1 @@ +Fix authenticated media responses using a wrong limit when following redirects over federation. \ No newline at end of file diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 6fd75fd381..12c41c39e9 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -464,6 +464,8 @@ class MatrixFederationHttpClient: self.max_long_retries = hs.config.federation.max_long_retries self.max_short_retries = hs.config.federation.max_short_retries + self.max_download_size = hs.config.media.max_upload_size + self._cooperator = Cooperator(scheduler=_make_scheduler(self.reactor)) self._sleeper = AwakenableSleeper(self.reactor) @@ -1756,8 +1758,10 @@ class MatrixFederationHttpClient: request.destination, str_url, ) + # We don't know how large the response will be upfront, so limit it to + # the `max_upload_size` config value. length, headers, _, _ = await self._simple_http_client.get_file( - str_url, output_stream, expected_size + str_url, output_stream, self.max_download_size ) logger.info( diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index e2f033fdae..6827412373 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -17,6 +17,7 @@ # [This file includes modifications made by New Vector Limited] # # +import io from typing import Any, Dict, Generator from unittest.mock import ANY, Mock, create_autospec @@ -32,7 +33,9 @@ from twisted.web.http import HTTPChannel from twisted.web.http_headers import Headers from synapse.api.errors import HttpResponseException, RequestSendFailed +from synapse.api.ratelimiting import Ratelimiter from synapse.config._base import ConfigError +from synapse.config.ratelimiting import RatelimitSettings from synapse.http.matrixfederationclient import ( ByteParser, MatrixFederationHttpClient, @@ -337,6 +340,81 @@ class FederationClientTests(HomeserverTestCase): r = self.successResultOf(d) self.assertEqual(r.code, 200) + def test_authed_media_redirect_response(self) -> None: + """ + Validate that, when following a `Location` redirect, the + maximum size is _not_ set to the initial response `Content-Length` and + the media file can be downloaded. + """ + limiter = Ratelimiter( + store=self.hs.get_datastores().main, + clock=self.clock, + cfg=RatelimitSettings(key="", per_second=0.17, burst_count=1048576), + ) + + output_stream = io.BytesIO() + + d = defer.ensureDeferred( + self.cl.federation_get_file( + "testserv:8008", "path", output_stream, limiter, "127.0.0.1", 10000 + ) + ) + + self.pump() + + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8008) + + # complete the connection and wire it up to a fake transport + protocol = factory.buildProtocol(None) + transport = StringTransport() + protocol.makeConnection(transport) + + # Deferred does not have a result + self.assertNoResult(d) + + redirect_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nLocation: http://testserv:8008/ab/c1/2345.txt\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n" + protocol.dataReceived( + b"HTTP/1.1 200 OK\r\n" + b"Server: Fake\r\n" + b"Content-Length: %i\r\n" + b"Content-Type: multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a\r\n\r\n" + % (len(redirect_data)) + ) + protocol.dataReceived(redirect_data) + + # Still no result, not followed the redirect yet + self.assertNoResult(d) + + # Now send the response returned by the server at `Location` + clients = self.reactor.tcpClients + (host, port, factory, _timeout, _bindAddress) = clients[1] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8008) + protocol = factory.buildProtocol(None) + transport = StringTransport() + protocol.makeConnection(transport) + + # make sure the length is longer than the initial response + data = b"Hello world!" * 30 + protocol.dataReceived( + b"HTTP/1.1 200 OK\r\n" + b"Server: Fake\r\n" + b"Content-Length: %i\r\n" + b"Content-Type: text/plain\r\n" + b"\r\n" + b"%s\r\n" + b"\r\n" % (len(data), data) + ) + + # We should get a successful response + length, _, _ = self.successResultOf(d) + self.assertEqual(length, len(data)) + self.assertEqual(output_stream.getvalue(), data) + @parameterized.expand(["get_json", "post_json", "delete_json", "put_json"]) def test_timeout_reading_body(self, method_name: str) -> None: """ From 8678516e79925c0be0fc922faebac28da6f0ae4c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Aug 2024 10:09:40 +0100 Subject: [PATCH 163/332] Sliding sync: Always send your own receipts down (#17617) When returning receipts in sliding sync for initial rooms we should always include our own receipts in the room (even if they don't match any timeline events). Reviewable commit-by-commit. --------- Co-authored-by: Eric Eastwood --- changelog.d/17617.misc | 1 + synapse/handlers/sliding_sync/extensions.py | 78 ++++-- synapse/storage/databases/main/receipts.py | 235 +++++++++++++----- .../sliding_sync/test_extension_receipts.py | 132 ++++++++++ 4 files changed, 359 insertions(+), 87 deletions(-) create mode 100644 changelog.d/17617.misc diff --git a/changelog.d/17617.misc b/changelog.d/17617.misc new file mode 100644 index 0000000000..ba05648965 --- /dev/null +++ b/changelog.d/17617.misc @@ -0,0 +1 @@ +Always return the user's own read receipts in sliding sync. diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index f05f45f72c..a2d4f24f9c 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -12,12 +12,13 @@ # . # +import itertools import logging from typing import TYPE_CHECKING, AbstractSet, Dict, Mapping, Optional, Sequence, Set from typing_extensions import assert_never -from synapse.api.constants import AccountDataTypes +from synapse.api.constants import AccountDataTypes, EduTypes from synapse.handlers.receipts import ReceiptEventSource from synapse.handlers.sliding_sync.types import ( HaveSentRoomFlag, @@ -25,6 +26,7 @@ from synapse.handlers.sliding_sync.types import ( PerConnectionState, ) from synapse.logging.opentracing import trace +from synapse.storage.databases.main.receipts import ReceiptInRoom from synapse.types import ( DeviceListUpdates, JsonMapping, @@ -485,15 +487,21 @@ class SlidingSyncExtensionHandler: initial_rooms.add(room_id) continue - # If we're sending down the room from scratch again for some reason, we - # should always resend the receipts as well (regardless of if - # we've sent them down before). This is to mimic the behaviour - # of what happens on initial sync, where you get a chunk of - # timeline with all of the corresponding receipts for the events in the timeline. + # If we're sending down the room from scratch again for some + # reason, we should always resend the receipts as well + # (regardless of if we've sent them down before). This is to + # mimic the behaviour of what happens on initial sync, where you + # get a chunk of timeline with all of the corresponding receipts + # for the events in the timeline. + # + # We also resend down receipts when we "expand" the timeline, + # (see the "XXX: Odd behavior" in + # `synapse.handlers.sliding_sync`). room_result = actual_room_response_map.get(room_id) - if room_result is not None and room_result.initial: - initial_rooms.add(room_id) - continue + if room_result is not None: + if room_result.initial or room_result.unstable_expanded_timeline: + initial_rooms.add(room_id) + continue room_status = previous_connection_state.receipts.have_sent_room(room_id) if room_status.status == HaveSentRoomFlag.LIVE: @@ -536,21 +544,49 @@ class SlidingSyncExtensionHandler: ) fetched_receipts.extend(previously_receipts) - # For rooms we haven't previously sent down, we could send all receipts - # from that room but we only want to include receipts for events - # in the timeline to avoid bloating and blowing up the sync response - # as the number of users in the room increases. (this behavior is part of the spec) - initial_rooms_and_event_ids = [ - (room_id, event.event_id) - for room_id in initial_rooms - if room_id in actual_room_response_map - for event in actual_room_response_map[room_id].timeline_events - ] - if initial_rooms_and_event_ids: + if initial_rooms: + # We also always send down receipts for the current user. + user_receipts = ( + await self.store.get_linearized_receipts_for_user_in_rooms( + user_id=sync_config.user.to_string(), + room_ids=initial_rooms, + to_key=to_token.receipt_key, + ) + ) + + # For rooms we haven't previously sent down, we could send all receipts + # from that room but we only want to include receipts for events + # in the timeline to avoid bloating and blowing up the sync response + # as the number of users in the room increases. (this behavior is part of the spec) + initial_rooms_and_event_ids = [ + (room_id, event.event_id) + for room_id in initial_rooms + if room_id in actual_room_response_map + for event in actual_room_response_map[room_id].timeline_events + ] initial_receipts = await self.store.get_linearized_receipts_for_events( room_and_event_ids=initial_rooms_and_event_ids, ) - fetched_receipts.extend(initial_receipts) + + # Combine the receipts for a room and add them to + # `fetched_receipts` + for room_id in initial_receipts.keys() | user_receipts.keys(): + receipt_content = ReceiptInRoom.merge_to_content( + list( + itertools.chain( + initial_receipts.get(room_id, []), + user_receipts.get(room_id, []), + ) + ) + ) + + fetched_receipts.append( + { + "room_id": room_id, + "type": EduTypes.RECEIPT, + "content": receipt_content, + } + ) fetched_receipts = ReceiptEventSource.filter_out_private_receipts( fetched_receipts, sync_config.user.to_string() diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 0a20f5db4c..bf10743574 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -30,10 +30,12 @@ from typing import ( Mapping, Optional, Sequence, + Set, Tuple, cast, ) +import attr from immutabledict import immutabledict from synapse.api.constants import EduTypes @@ -65,6 +67,57 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +@attr.s(auto_attribs=True, slots=True, frozen=True) +class ReceiptInRoom: + receipt_type: str + user_id: str + event_id: str + thread_id: Optional[str] + data: JsonMapping + + @staticmethod + def merge_to_content(receipts: Collection["ReceiptInRoom"]) -> JsonMapping: + """Merge the given set of receipts (in a room) into the receipt + content format. + + Returns: + A mapping of the combined receipts: event ID -> receipt type -> user + ID -> receipt data. + """ + # MSC4102: always replace threaded receipts with unthreaded ones if + # there is a clash. This means we will drop some receipts, but MSC4102 + # is designed to drop semantically meaningless receipts, so this is + # okay. Previously, we would drop meaningful data! + # + # We do this by finding the unthreaded receipts, and then filtering out + # matching threaded receipts. + + # Set of (user_id, event_id) + unthreaded_receipts: Set[Tuple[str, str]] = { + (receipt.user_id, receipt.event_id) + for receipt in receipts + if receipt.thread_id is None + } + + # event_id -> receipt_type -> user_id -> receipt data + content: Dict[str, Dict[str, Dict[str, JsonMapping]]] = {} + for receipt in receipts: + data = receipt.data + if receipt.thread_id is not None: + if (receipt.user_id, receipt.event_id) in unthreaded_receipts: + # Ignore threaded receipts if we have an unthreaded one. + continue + + data = dict(data) + data["thread_id"] = receipt.thread_id + + content.setdefault(receipt.event_id, {}).setdefault( + receipt.receipt_type, {} + )[receipt.user_id] = data + + return content + + class ReceiptsWorkerStore(SQLBaseStore): def __init__( self, @@ -401,7 +454,7 @@ class ReceiptsWorkerStore(SQLBaseStore): def f( txn: LoggingTransaction, - ) -> List[Tuple[str, str, str, str, Optional[str], str]]: + ) -> Mapping[str, Sequence[ReceiptInRoom]]: if from_key: sql = """ SELECT stream_id, instance_name, room_id, receipt_type, @@ -431,50 +484,46 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql + clause, [to_key.get_max_stream_pos()] + list(args)) - return [ - (room_id, receipt_type, user_id, event_id, thread_id, data) - for stream_id, instance_name, room_id, receipt_type, user_id, event_id, thread_id, data in txn - if MultiWriterStreamToken.is_stream_position_in_range( + results: Dict[str, List[ReceiptInRoom]] = {} + for ( + stream_id, + instance_name, + room_id, + receipt_type, + user_id, + event_id, + thread_id, + data, + ) in txn: + if not MultiWriterStreamToken.is_stream_position_in_range( from_key, to_key, instance_name, stream_id + ): + continue + + results.setdefault(room_id, []).append( + ReceiptInRoom( + receipt_type=receipt_type, + user_id=user_id, + event_id=event_id, + thread_id=thread_id, + data=db_to_json(data), + ) ) - ] + + return results txn_results = await self.db_pool.runInteraction( "_get_linearized_receipts_for_rooms", f ) - results: JsonDict = {} - for room_id, receipt_type, user_id, event_id, thread_id, data in txn_results: - # We want a single event per room, since we want to batch the - # receipts by room, event and type. - room_event = results.setdefault( - room_id, - {"type": EduTypes.RECEIPT, "room_id": room_id, "content": {}}, - ) - - # The content is of the form: - # {"$foo:bar": { "read": { "@user:host": }, .. }, .. } - event_entry = room_event["content"].setdefault(event_id, {}) - receipt_type_dict = event_entry.setdefault(receipt_type, {}) - - # MSC4102: always replace threaded receipts with unthreaded ones if there is a clash. - # Specifically: - # - if there is no existing receipt, great, set the data. - # - if there is an existing receipt, is it threaded (thread_id present)? - # YES: replace if this receipt has no thread id. NO: do not replace. - # This means we will drop some receipts, but MSC4102 is designed to drop semantically - # meaningless receipts, so this is okay. Previously, we would drop meaningful data! - receipt_data = db_to_json(data) - if user_id in receipt_type_dict: # existing receipt - # is the existing receipt threaded and we are currently processing an unthreaded one? - if "thread_id" in receipt_type_dict[user_id] and not thread_id: - receipt_type_dict[user_id] = ( - receipt_data # replace with unthreaded one - ) - else: # receipt does not exist, just set it - receipt_type_dict[user_id] = receipt_data - if thread_id: - receipt_type_dict[user_id]["thread_id"] = thread_id + results: JsonDict = { + room_id: { + "room_id": room_id, + "type": EduTypes.RECEIPT, + "content": ReceiptInRoom.merge_to_content(receipts), + } + for room_id, receipts in txn_results.items() + } results = { room_id: [results[room_id]] if room_id in results else [] @@ -485,7 +534,7 @@ class ReceiptsWorkerStore(SQLBaseStore): async def get_linearized_receipts_for_events( self, room_and_event_ids: Collection[Tuple[str, str]], - ) -> Sequence[JsonMapping]: + ) -> Mapping[str, Sequence[ReceiptInRoom]]: """Get all receipts for the given set of events. Arguments: @@ -495,6 +544,8 @@ class ReceiptsWorkerStore(SQLBaseStore): Returns: A list of receipts, one per room. """ + if not room_and_event_ids: + return {} def get_linearized_receipts_for_events_txn( txn: LoggingTransaction, @@ -514,8 +565,8 @@ class ReceiptsWorkerStore(SQLBaseStore): return txn.fetchall() - # room_id -> event_id -> receipt_type -> user_id -> receipt data - room_to_content: Dict[str, Dict[str, Dict[str, Dict[str, JsonMapping]]]] = {} + # room_id -> receipts + room_to_receipts: Dict[str, List[ReceiptInRoom]] = {} for batch in batch_iter(room_and_event_ids, 1000): batch_results = await self.db_pool.runInteraction( "get_linearized_receipts_for_events", @@ -531,33 +582,17 @@ class ReceiptsWorkerStore(SQLBaseStore): thread_id, data, ) in batch_results: - content = room_to_content.setdefault(room_id, {}) - user_receipts = content.setdefault(event_id, {}).setdefault( - receipt_type, {} + room_to_receipts.setdefault(room_id, []).append( + ReceiptInRoom( + receipt_type=receipt_type, + user_id=user_id, + event_id=event_id, + thread_id=thread_id, + data=db_to_json(data), + ) ) - receipt_data = db_to_json(data) - if thread_id is not None: - receipt_data["thread_id"] = thread_id - - # MSC4102: always replace threaded receipts with unthreaded ones - # if there is a clash. Specifically: - # - if there is no existing receipt, great, set the data. - # - if there is an existing receipt, is it threaded (thread_id - # present)? YES: replace if this receipt has no thread id. - # NO: do not replace. This means we will drop some receipts, but - # MSC4102 is designed to drop semantically meaningless receipts, - # so this is okay. Previously, we would drop meaningful data! - if user_id in user_receipts: - if "thread_id" in user_receipts[user_id] and not thread_id: - user_receipts[user_id] = receipt_data - else: - user_receipts[user_id] = receipt_data - - return [ - {"type": EduTypes.RECEIPT, "room_id": room_id, "content": content} - for room_id, content in room_to_content.items() - ] + return room_to_receipts @cached( num_args=2, @@ -630,6 +665,74 @@ class ReceiptsWorkerStore(SQLBaseStore): return results + async def get_linearized_receipts_for_user_in_rooms( + self, user_id: str, room_ids: StrCollection, to_key: MultiWriterStreamToken + ) -> Mapping[str, Sequence[ReceiptInRoom]]: + """Fetch all receipts for the user in the given room. + + Returns: + A dict from room ID to receipts in the room. + """ + + def get_linearized_receipts_for_user_in_rooms_txn( + txn: LoggingTransaction, + batch_room_ids: StrCollection, + ) -> List[Tuple[str, str, str, str, Optional[str], str]]: + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", batch_room_ids + ) + + sql = f""" + SELECT instance_name, stream_id, room_id, receipt_type, user_id, event_id, thread_id, data + FROM receipts_linearized + WHERE {clause} AND user_id = ? AND stream_id <= ? + """ + + args.append(user_id) + args.append(to_key.get_max_stream_pos()) + + txn.execute(sql, args) + + return [ + (room_id, receipt_type, user_id, event_id, thread_id, data) + for instance_name, stream_id, room_id, receipt_type, user_id, event_id, thread_id, data in txn + if MultiWriterStreamToken.is_stream_position_in_range( + low=None, + high=to_key, + instance_name=instance_name, + pos=stream_id, + ) + ] + + # room_id -> receipts + room_to_receipts: Dict[str, List[ReceiptInRoom]] = {} + for batch in batch_iter(room_ids, 1000): + batch_results = await self.db_pool.runInteraction( + "get_linearized_receipts_for_events", + get_linearized_receipts_for_user_in_rooms_txn, + batch, + ) + + for ( + room_id, + receipt_type, + user_id, + event_id, + thread_id, + data, + ) in batch_results: + room_to_receipts.setdefault(room_id, []).append( + ReceiptInRoom( + receipt_type=receipt_type, + user_id=user_id, + event_id=event_id, + thread_id=thread_id, + data=db_to_json(data), + ) + ) + + return room_to_receipts + async def get_rooms_with_receipts_between( self, room_ids: StrCollection, diff --git a/tests/rest/client/sliding_sync/test_extension_receipts.py b/tests/rest/client/sliding_sync/test_extension_receipts.py index 39c51b367c..e842349ed2 100644 --- a/tests/rest/client/sliding_sync/test_extension_receipts.py +++ b/tests/rest/client/sliding_sync/test_extension_receipts.py @@ -782,3 +782,135 @@ class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): {user2_id}, exact=True, ) + + def test_return_own_read_receipts(self) -> None: + """Test that we always send the user's own read receipts in initial + rooms, even if the receipts don't match events in the timeline.. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Send a message and read receipts into room1 + event_response = self.helper.send(room_id1, body="new event", tok=user2_tok) + room1_event_id = event_response["event_id"] + + self.helper.send_read_receipt(room_id1, room1_event_id, tok=user1_tok) + self.helper.send_read_receipt(room_id1, room1_event_id, tok=user2_tok) + + # Now send a message so the above message is not in the timeline. + self.helper.send(room_id1, body="new event", tok=user2_tok) + + # Make a SS request for only the latest message. + sync_body = { + "lists": { + "main": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + }, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # We should get our own receipt in room1, even though its not in the + # timeline limit. + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + + # We should only see our read receipt, not the other user's. + receipt = response_body["extensions"]["receipts"]["rooms"][room_id1] + self.assertIncludes( + receipt["content"][room1_event_id][ReceiptTypes.READ].keys(), + {user1_id}, + exact=True, + ) + + def test_read_receipts_expanded_timeline(self) -> None: + """Test that we get read receipts when we expand the timeline limit (`unstable_expanded_timeline`).""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Send a message and read receipt into room1 + event_response = self.helper.send(room_id1, body="new event", tok=user2_tok) + room1_event_id = event_response["event_id"] + + self.helper.send_read_receipt(room_id1, room1_event_id, tok=user2_tok) + + # Now send a message so the above message is not in the timeline. + self.helper.send(room_id1, body="new event", tok=user2_tok) + + # Make a SS request for only the latest message. + sync_body = { + "lists": { + "main": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + }, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # We shouldn't see user2 read receipt, as its not in the timeline + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) + + # Now do another request with a room subscription with an increased timeline limit + sync_body["room_subscriptions"] = { + room_id1: { + "required_state": [], + "timeline_limit": 2, + } + } + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # Assert that we did actually get an expanded timeline + room_response = response_body["rooms"][room_id1] + self.assertNotIn("initial", room_response) + self.assertEqual(room_response["unstable_expanded_timeline"], True) + + # We should now see user2 read receipt, as its in the expanded timeline + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + + # We should only see our read receipt, not the other user's. + receipt = response_body["extensions"]["receipts"]["rooms"][room_id1] + self.assertIncludes( + receipt["content"][room1_event_id][ReceiptTypes.READ].keys(), + {user2_id}, + exact=True, + ) From 9eed8cd8789d929488b15de36373249239acece5 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 29 Aug 2024 12:33:14 +0200 Subject: [PATCH 164/332] fix listener docs - admin api only on main process (#17590) --- changelog.d/17590.doc | 1 + docs/usage/configuration/config_documentation.md | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17590.doc diff --git a/changelog.d/17590.doc b/changelog.d/17590.doc new file mode 100644 index 0000000000..eced3d96cb --- /dev/null +++ b/changelog.d/17590.doc @@ -0,0 +1 @@ +Clarify that the admin api resource is only loaded on the main process and not workers. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 567bbf88d2..24d9249f1a 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -509,7 +509,8 @@ Unix socket support (_Added in Synapse 1.89.0_): Valid resource names are: -* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`. +* `client`: the client-server API (/_matrix/client). Also implies `media` and `static`. + If configuring the main process, the Synapse Admin API (/_synapse/admin) is also implied. * `consent`: user consent forms (/_matrix/consent). See [here](../../consent_tracking.md) for more. @@ -1765,7 +1766,7 @@ rc_3pid_validation: This option sets ratelimiting how often invites can be sent in a room or to a specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10`, -`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer` +`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer` defaults to `per_second: 0.3`, `burst_count: 10`. Client requests that invite user(s) when [creating a @@ -1966,7 +1967,7 @@ max_image_pixels: 35M --- ### `remote_media_download_burst_count` -Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized - if the bucket is full, ie a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains. Defaults to 500MiB. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests. +Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized - if the bucket is full, ie a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains. Defaults to 500MiB. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests. Example configuration: ```yaml From a8f29c991314b46145cb5634fb196215bdcbd364 Mon Sep 17 00:00:00 2001 From: meise Date: Thu, 29 Aug 2024 12:39:16 +0200 Subject: [PATCH 165/332] docs: fix typo in saml2_config example (#17594) --- changelog.d/17594.doc | 1 + docs/usage/configuration/config_documentation.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17594.doc diff --git a/changelog.d/17594.doc b/changelog.d/17594.doc new file mode 100644 index 0000000000..95b0042005 --- /dev/null +++ b/changelog.d/17594.doc @@ -0,0 +1 @@ +Fixed typo in `saml2_config` config [example](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#saml2_config). diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 24d9249f1a..c18f03d321 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3303,8 +3303,8 @@ saml2_config: contact_person: - given_name: Bob sur_name: "the Sysadmin" - email_address": ["admin@example.com"] - contact_type": technical + email_address: ["admin@example.com"] + contact_type: technical saml_session_lifetime: 5m From b21134de3bc68242afb72248c0fd93dae675cbcf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Aug 2024 13:26:17 +0100 Subject: [PATCH 166/332] Fix starting non-media repos (#17626) Regressed in #17543. The `max_download_size` config is not available on workers that don't load the media repo. Besides, we should honour the max_size param that was passed into the function. --- changelog.d/17543.bugfix | 2 +- changelog.d/17626.bugfix | 1 + synapse/http/matrixfederationclient.py | 6 ++---- 3 files changed, 4 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17626.bugfix diff --git a/changelog.d/17543.bugfix b/changelog.d/17543.bugfix index 152b305e58..1dbb2a2f45 100644 --- a/changelog.d/17543.bugfix +++ b/changelog.d/17543.bugfix @@ -1 +1 @@ -Fix authenticated media responses using a wrong limit when following redirects over federation. \ No newline at end of file +Fix authenticated media responses using a wrong limit when following redirects over federation. diff --git a/changelog.d/17626.bugfix b/changelog.d/17626.bugfix new file mode 100644 index 0000000000..1dbb2a2f45 --- /dev/null +++ b/changelog.d/17626.bugfix @@ -0,0 +1 @@ +Fix authenticated media responses using a wrong limit when following redirects over federation. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 12c41c39e9..ecbbb6cfc4 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -464,8 +464,6 @@ class MatrixFederationHttpClient: self.max_long_retries = hs.config.federation.max_long_retries self.max_short_retries = hs.config.federation.max_short_retries - self.max_download_size = hs.config.media.max_upload_size - self._cooperator = Cooperator(scheduler=_make_scheduler(self.reactor)) self._sleeper = AwakenableSleeper(self.reactor) @@ -1759,9 +1757,9 @@ class MatrixFederationHttpClient: str_url, ) # We don't know how large the response will be upfront, so limit it to - # the `max_upload_size` config value. + # the `max_size` config value. length, headers, _, _ = await self._simple_http_client.get_file( - str_url, output_stream, self.max_download_size + str_url, output_stream, max_size ) logger.info( From 594cd5f9fd7b29cee22f878d4df5cb5818ba90e9 Mon Sep 17 00:00:00 2001 From: Gordan Trevis Date: Thu, 29 Aug 2024 16:34:29 +0200 Subject: [PATCH 167/332] Fix Internal Server Error for Non-Local Users in Room Actions (#17607) --- changelog.d/17607.bugfix | 1 + synapse/storage/databases/main/roommember.py | 8 +++---- tests/handlers/test_room_member.py | 22 +++++++++++++++++++- 3 files changed, 26 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17607.bugfix diff --git a/changelog.d/17607.bugfix b/changelog.d/17607.bugfix new file mode 100644 index 0000000000..74201135b6 --- /dev/null +++ b/changelog.d/17607.bugfix @@ -0,0 +1 @@ +Return `400 M_BAD_JSON` upon attempting to complete various room actions with a non-local user ID and unknown room ID, rather than an internal server error. \ No newline at end of file diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 1d9f0f52e1..71baf57663 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -19,6 +19,7 @@ # # import logging +from http import HTTPStatus from typing import ( TYPE_CHECKING, AbstractSet, @@ -39,6 +40,7 @@ from typing import ( import attr from synapse.api.constants import EventTypes, Membership +from synapse.api.errors import Codes, SynapseError from synapse.logging.opentracing import trace from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -631,10 +633,8 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ # Paranoia check. if not self.hs.is_mine_id(user_id): - raise Exception( - "Cannot call 'get_local_current_membership_for_user_in_room' on " - "non-local user %s" % (user_id,), - ) + message = f"Provided user_id {user_id} is a non-local user" + raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.BAD_JSON) results = cast( Optional[Tuple[str, str]], diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 213a66ed1a..acb403cb2f 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -6,7 +6,7 @@ import synapse.rest.admin import synapse.rest.client.login import synapse.rest.client.room from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import LimitExceededError, SynapseError +from synapse.api.errors import Codes, LimitExceededError, SynapseError from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import FrozenEventV3 from synapse.federation.federation_client import SendJoinResult @@ -383,6 +383,26 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase): """Tests that a user cannot not forgets a room that has not left.""" self.get_failure(self.handler.forget(self.alice_ID, self.room_id), SynapseError) + def test_nonlocal_room_user_action(self) -> None: + """ + Test that non-local user ids cannot perform room actions through + this homeserver. + """ + alien_user_id = UserID.from_string("@cheeky_monkey:matrix.org") + bad_room_id = f"{self.room_id}+BAD_ID" + + exc = self.get_failure( + self.handler.update_membership( + create_requester(self.alice), + alien_user_id, + bad_room_id, + "unban", + ), + SynapseError, + ).value + + self.assertEqual(exc.errcode, Codes.BAD_JSON) + def test_rejoin_forgotten_by_user(self) -> None: """Test that a user that has forgotten a room can do a re-join. The room was not forgotten from the local server. From 1a6b718f8c88424440b43e8d0d4fac54faa191f1 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 29 Aug 2024 10:09:51 -0500 Subject: [PATCH 168/332] Sliding Sync: Pre-populate room data for quick filtering/sorting (#17512) Pre-populate room data for quick filtering/sorting in the Sliding Sync API Spawning from https://github.com/element-hq/synapse/pull/17450#discussion_r1697335578 This PR is acting as the Synapse version `N+1` step in the gradual migration being tracked by https://github.com/element-hq/synapse/issues/17623 Adding two new database tables: - `sliding_sync_joined_rooms`: A table for storing room meta data that the local server is still participating in. The info here can be shared across all `Membership.JOIN`. Keyed on `(room_id)` and updated when the relevant room current state changes or a new event is sent in the room. - `sliding_sync_membership_snapshots`: A table for storing a snapshot of room meta data at the time of the local user's membership. Keyed on `(room_id, user_id)` and only updated when a user's membership in a room changes. Also adds background updates to populate these tables with all of the existing data. We want to have the guarantee that if a row exists in the sliding sync tables, we are able to rely on it (accurate data). And if a row doesn't exist, we use a fallback to get the same info until the background updates fill in the rows or a new event comes in triggering it to be fully inserted. This means we need a couple extra things in place until we bump `SCHEMA_COMPAT_VERSION` and run the foreground update in the `N+2` part of the gradual migration. For context on why we can't rely on the tables without these things see [1]. 1. On start-up, block until we clear out any rows for the rooms that have had events since the max-`stream_ordering` of the `sliding_sync_joined_rooms` table (compare to max-`stream_ordering` of the `events` table). For `sliding_sync_membership_snapshots`, we can compare to the max-`stream_ordering` of `local_current_membership` - This accounts for when someone downgrades their Synapse version and then upgrades it again. This will ensure that we don't have any stale/out-of-date data in the `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables since any new events sent in rooms would have also needed to be written to the sliding sync tables. For example a new event needs to bump `event_stream_ordering` in `sliding_sync_joined_rooms` table or some state in the room changing (like the room name). Or another example of someone's membership changing in a room affecting `sliding_sync_membership_snapshots`. 1. Add another background update that will catch-up with any rows that were just deleted from the sliding sync tables (based on the activity in the `events`/`local_current_membership`). The rooms that need recalculating are added to the `sliding_sync_joined_rooms_to_recalculate` table. 1. Making sure rows are fully inserted. Instead of partially inserting, we need to check if the row already exists and fully insert all data if not. All of this extra functionality can be removed once the `SCHEMA_COMPAT_VERSION` is bumped with support for the new sliding sync tables so people can no longer downgrade (the `N+2` part of the gradual migration).
[1] For `sliding_sync_joined_rooms`, since we partially insert rows as state comes in, we can't rely on the existence of the row for a given `room_id`. We can't even rely on looking at whether the background update has finished. There could still be partial rows from when someone reverted their Synapse version after the background update finished, had some state changes (or new rooms), then upgraded again and more state changes happen leaving a partial row. For `sliding_sync_membership_snapshots`, we insert items as a whole except for the `forgotten` column ~~so we can rely on rows existing and just need to always use a fallback for the `forgotten` data. We can't use the `forgotten` column in the table for the same reasons above about `sliding_sync_joined_rooms`.~~ We could have an out-of-date membership from when someone reverted their Synapse version. (same problems as outlined for `sliding_sync_joined_rooms` above) Discussed in an [internal meeting](https://docs.google.com/document/d/1MnuvPkaCkT_wviSQZ6YKBjiWciCBFMd-7hxyCO-OCbQ/edit#bookmark=id.dz5x6ef4mxz7)
### TODO - [x] Update `stream_ordering`/`bump_stamp` - [x] Handle remote invites - [x] Handle state resets - [x] Consider adding `sender` so we can filter `LEAVE` memberships and distinguish from kicks. - [x] We should add it to be able to tell leaves from kicks - [x] Consider adding `tombstone` state to help address https://github.com/element-hq/synapse/issues/17540 - [x] We should add it `tombstone_successor_room_id` - [x] Consider adding `forgotten` status to avoid extra lookup/table-join on `room_memberships` - [x] We should add it - [x] Background update to fill in values for all joined rooms and non-join membership - [x] Clean-up tables when room is deleted - [ ] Make sure tables are useful to our use case - First explored in https://github.com/element-hq/synapse/compare/erikj/ss_use_new_tables - Also explored in https://github.com/element-hq/synapse/commit/76b5a576eb363496315dfd39510cad7d02b0fc73 - [x] Plan for how can we use this with a fallback - See plan discussed above in main area of the issue description - Discussed in an [internal meeting](https://docs.google.com/document/d/1MnuvPkaCkT_wviSQZ6YKBjiWciCBFMd-7hxyCO-OCbQ/edit#bookmark=id.dz5x6ef4mxz7) - [x] Plan for how we can rely on this new table without a fallback - Synapse version `N+1`: (this PR) Bump `SCHEMA_VERSION` to `87`. Add new tables and background update to backfill all rows. Since this is a new table, we don't have to add any `NOT VALID` constraints and validate them when the background update completes. Read from new tables with a fallback in cases where the rows aren't filled in yet. - Synapse version `N+2`: Bump `SCHEMA_VERSION` to `88` and bump `SCHEMA_COMPAT_VERSION` to `87` because we don't want people to downgrade and miss writes while they are on an older version. Add a foreground update to finish off the backfill so we can read from new tables without the fallback. Application code can now rely on the new tables being populated. - Discussed in an [internal meeting](https://docs.google.com/document/d/1MnuvPkaCkT_wviSQZ6YKBjiWciCBFMd-7hxyCO-OCbQ/edit#bookmark=id.hh7shg4cxdhj) ### Dev notes ``` SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.storage.test_events.SlidingSyncPrePopulatedTablesTestCase SYNAPSE_POSTGRES=1 SYNAPSE_POSTGRES_USER=postgres SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.storage.test_events.SlidingSyncPrePopulatedTablesTestCase ``` ``` SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.handlers.test_sliding_sync.FilterRoomsTestCase ``` Reference: - [Development docs on background updates and worked examples of gradual migrations ](https://github.com/element-hq/synapse/blob/1dfa59b238cee0dc62163588cc9481896c288979/docs/development/database_schema.md#background-updates) - A real example of a gradual migration: https://github.com/matrix-org/synapse/pull/15649#discussion_r1213779514 - Adding `rooms.creator` field that needed a background update to backfill data, https://github.com/matrix-org/synapse/pull/10697 - Adding `rooms.room_version` that needed a background update to backfill data, https://github.com/matrix-org/synapse/pull/6729 - Adding `room_stats_state.room_type` that needed a background update to backfill data, https://github.com/matrix-org/synapse/pull/13031 - Tables from MSC2716: `insertion_events`, `insertion_event_edges`, `insertion_event_extremities`, `batch_events` - `current_state_events` updated in `synapse/storage/databases/main/events.py` --- ``` persist_event (adds to queue) _persist_event_batch _persist_events_and_state_updates (assigns `stream_ordering` to events) _persist_events_txn _store_event_txn _update_metadata_tables_txn _store_room_members_txn _update_current_state_txn ``` --- > Concatenated Indexes [...] (also known as multi-column, composite or combined index) > > [...] key consists of multiple columns. > > We can take advantage of the fact that the first index column is always usable for searching > > *-- https://use-the-index-luke.com/sql/where-clause/the-equals-operator/concatenated-keys* --- Dealing with `portdb` (`synapse/_scripts/synapse_port_db.py`), https://github.com/element-hq/synapse/pull/17512#discussion_r1725998219 ---
SQL queries: Both of these are equivalent and work in SQLite and Postgres Options 1: ```sql WITH data_table (room_id, user_id, membership_event_id, membership, event_stream_ordering, {", ".join(insert_keys)}) AS ( VALUES ( ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?), (SELECT stream_ordering FROM events WHERE event_id = ?), {", ".join("?" for _ in insert_values)} ) ) INSERT INTO sliding_sync_non_join_memberships (room_id, user_id, membership_event_id, membership, event_stream_ordering, {", ".join(insert_keys)}) SELECT * FROM data_table WHERE membership != ? ON CONFLICT (room_id, user_id) DO UPDATE SET membership_event_id = EXCLUDED.membership_event_id, membership = EXCLUDED.membership, event_stream_ordering = EXCLUDED.event_stream_ordering, {", ".join(f"{key} = EXCLUDED.{key}" for key in insert_keys)} ``` Option 2: ```sql INSERT INTO sliding_sync_non_join_memberships (room_id, user_id, membership_event_id, membership, event_stream_ordering, {", ".join(insert_keys)}) SELECT column1 as room_id, column2 as user_id, column3 as membership_event_id, column4 as membership, column5 as event_stream_ordering, {", ".join("column" + str(i) for i in range(6, 6 + len(insert_keys)))} FROM ( VALUES ( ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?), (SELECT stream_ordering FROM events WHERE event_id = ?), {", ".join("?" for _ in insert_values)} ) ) as v WHERE membership != ? ON CONFLICT (room_id, user_id) DO UPDATE SET membership_event_id = EXCLUDED.membership_event_id, membership = EXCLUDED.membership, event_stream_ordering = EXCLUDED.event_stream_ordering, {", ".join(f"{key} = EXCLUDED.{key}" for key in insert_keys)} ``` If we don't need the `membership` condition, we could use: ```sql INSERT INTO sliding_sync_non_join_memberships (room_id, membership_event_id, user_id, membership, event_stream_ordering, {", ".join(insert_keys)}) VALUES ( ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?), (SELECT stream_ordering FROM events WHERE event_id = ?), {", ".join("?" for _ in insert_values)} ) ON CONFLICT (room_id, user_id) DO UPDATE SET membership_event_id = EXCLUDED.membership_event_id, membership = EXCLUDED.membership, event_stream_ordering = EXCLUDED.event_stream_ordering, {", ".join(f"{key} = EXCLUDED.{key}" for key in insert_keys)} ```
### Pull Request Checklist * [x] Pull request is based on the develop branch * [x] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [x] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --------- Co-authored-by: Erik Johnston --- changelog.d/17512.misc | 1 + synapse/_scripts/synapse_port_db.py | 5 + synapse/api/constants.py | 2 + synapse/handlers/sliding_sync/__init__.py | 22 +- synapse/storage/controllers/persist_events.py | 9 +- synapse/storage/database.py | 29 +- synapse/storage/databases/main/events.py | 967 +++- .../databases/main/events_bg_updates.py | 1030 +++- .../storage/databases/main/events_worker.py | 8 + .../storage/databases/main/purge_events.py | 4 + synapse/storage/databases/main/roommember.py | 6 + .../storage/databases/main/state_deltas.py | 97 +- synapse/storage/databases/main/stream.py | 66 +- synapse/storage/schema/__init__.py | 6 +- .../delta/87/01_sliding_sync_memberships.sql | 169 + synapse/types/handlers/__init__.py | 13 + .../client/sliding_sync/test_rooms_meta.py | 58 +- tests/storage/test__base.py | 18 + tests/storage/test_events.py | 3 + tests/storage/test_roommember.py | 180 +- tests/storage/test_sliding_sync_tables.py | 4830 +++++++++++++++++ tests/unittest.py | 4 +- 22 files changed, 7413 insertions(+), 114 deletions(-) create mode 100644 changelog.d/17512.misc create mode 100644 synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql create mode 100644 tests/storage/test_sliding_sync_tables.py diff --git a/changelog.d/17512.misc b/changelog.d/17512.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17512.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 5c6db8118f..195c95d376 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -129,6 +129,11 @@ BOOLEAN_COLUMNS = { "remote_media_cache": ["authenticated"], "room_stats_state": ["is_federatable"], "rooms": ["is_public", "has_auth_chain_index"], + "sliding_sync_joined_rooms": ["is_encrypted"], + "sliding_sync_membership_snapshots": [ + "has_known_state", + "is_encrypted", + ], "users": ["shadow_banned", "approved", "locked", "suspended"], "un_partial_stated_event_stream": ["rejection_status_changed"], "users_who_share_rooms": ["share_private"], diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 7dcb1e01fd..8e3b404aed 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -245,6 +245,8 @@ class EventContentFields: # `m.room.encryption`` algorithm field ENCRYPTION_ALGORITHM: Final = "algorithm" + TOMBSTONE_SUCCESSOR_ROOM: Final = "replacement_room" + class EventUnsignedContentFields: """Fields found inside the 'unsigned' data on events""" diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index ccd464cd1c..c34ba83cd6 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -57,7 +57,11 @@ from synapse.types import ( StreamKeyType, StreamToken, ) -from synapse.types.handlers import SlidingSyncConfig, SlidingSyncResult +from synapse.types.handlers import ( + SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + SlidingSyncConfig, + SlidingSyncResult, +) from synapse.types.state import StateFilter from synapse.util.async_helpers import concurrently_execute from synapse.visibility import filter_events_for_client @@ -75,18 +79,6 @@ sync_processing_time = Histogram( ) -# The event types that clients should consider as new activity. -DEFAULT_BUMP_EVENT_TYPES = { - EventTypes.Create, - EventTypes.Message, - EventTypes.Encrypted, - EventTypes.Sticker, - EventTypes.CallInvite, - EventTypes.PollStart, - EventTypes.LiveLocationShareStart, -} - - class SlidingSyncHandler: def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @@ -986,7 +978,9 @@ class SlidingSyncHandler: # Figure out the last bump event in the room last_bump_event_result = ( await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, to_token.room_key, event_types=DEFAULT_BUMP_EVENT_TYPES + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, ) ) diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index d0e015bf19..ac0919340b 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -502,8 +502,15 @@ class EventsPersistenceStorageController: """ state = await self._calculate_current_state(room_id) delta = await self._calculate_state_delta(room_id, state) + sliding_sync_table_changes = ( + await self.persist_events_store._calculate_sliding_sync_table_changes( + room_id, [], delta + ) + ) - await self.persist_events_store.update_current_state(room_id, delta) + await self.persist_events_store.update_current_state( + room_id, delta, sliding_sync_table_changes + ) async def _calculate_current_state(self, room_id: str) -> StateMap[str]: """Calculate the current state of a room, based on the forward extremities diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 569f618193..d666039120 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -35,6 +35,7 @@ from typing import ( Iterable, Iterator, List, + Mapping, Optional, Sequence, Tuple, @@ -1254,9 +1255,9 @@ class DatabasePool: self, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], - values: Dict[str, Any], - insertion_values: Optional[Dict[str, Any]] = None, + keyvalues: Mapping[str, Any], + values: Mapping[str, Any], + insertion_values: Optional[Mapping[str, Any]] = None, where_clause: Optional[str] = None, ) -> bool: """ @@ -1299,9 +1300,9 @@ class DatabasePool: self, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], - values: Dict[str, Any], - insertion_values: Optional[Dict[str, Any]] = None, + keyvalues: Mapping[str, Any], + values: Mapping[str, Any], + insertion_values: Optional[Mapping[str, Any]] = None, where_clause: Optional[str] = None, lock: bool = True, ) -> bool: @@ -1322,7 +1323,7 @@ class DatabasePool: if lock: # We need to lock the table :( - self.engine.lock_table(txn, table) + txn.database_engine.lock_table(txn, table) def _getwhere(key: str) -> str: # If the value we're passing in is None (aka NULL), we need to use @@ -1376,13 +1377,13 @@ class DatabasePool: # successfully inserted return True + @staticmethod def simple_upsert_txn_native_upsert( - self, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], - values: Dict[str, Any], - insertion_values: Optional[Dict[str, Any]] = None, + keyvalues: Mapping[str, Any], + values: Mapping[str, Any], + insertion_values: Optional[Mapping[str, Any]] = None, where_clause: Optional[str] = None, ) -> bool: """ @@ -1535,8 +1536,8 @@ class DatabasePool: self.simple_upsert_txn_emulated(txn, table, _keys, _vals, lock=False) + @staticmethod def simple_upsert_many_txn_native_upsert( - self, txn: LoggingTransaction, table: str, key_names: Collection[str], @@ -1966,8 +1967,8 @@ class DatabasePool: def simple_update_txn( txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], - updatevalues: Dict[str, Any], + keyvalues: Mapping[str, Any], + updatevalues: Mapping[str, Any], ) -> int: """ Update rows in the given database table. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 1f7acdb859..60c92e5804 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -32,6 +32,7 @@ from typing import ( Iterable, List, Optional, + Sequence, Set, Tuple, cast, @@ -39,19 +40,27 @@ from typing import ( import attr from prometheus_client import Counter +from typing_extensions import TypedDict import synapse.metrics -from synapse.api.constants import EventContentFields, EventTypes, RelationTypes +from synapse.api.constants import ( + EventContentFields, + EventTypes, + Membership, + RelationTypes, +) from synapse.api.errors import PartialStateConflictError from synapse.api.room_versions import RoomVersions -from synapse.events import EventBase, relation_from_event +from synapse.events import EventBase, StrippedStateEvent, relation_from_event from synapse.events.snapshot import EventContext +from synapse.events.utils import parse_stripped_state_event from synapse.logging.opentracing import trace from synapse.storage._base import db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, LoggingTransaction, + make_tuple_in_list_sql_clause, ) from synapse.storage.databases.main.event_federation import EventFederationStore from synapse.storage.databases.main.events_worker import EventCacheEntry @@ -59,7 +68,15 @@ from synapse.storage.databases.main.search import SearchEntry from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import AbstractStreamIdGenerator from synapse.storage.util.sequence import SequenceGenerator -from synapse.types import JsonDict, StateMap, StrCollection, get_domain_from_id +from synapse.types import ( + JsonDict, + MutableStateMap, + StateMap, + StrCollection, + get_domain_from_id, +) +from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES +from synapse.types.state import StateFilter from synapse.util import json_encoder from synapse.util.iterutils import batch_iter, sorted_topologically from synapse.util.stringutils import non_null_str_or_none @@ -78,6 +95,19 @@ event_counter = Counter( ["type", "origin_type", "origin_entity"], ) +# State event type/key pairs that we need to gather to fill in the +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables. +SLIDING_SYNC_RELEVANT_STATE_SET = ( + # So we can fill in the `room_type` column + (EventTypes.Create, ""), + # So we can fill in the `is_encrypted` column + (EventTypes.RoomEncryption, ""), + # So we can fill in the `room_name` column + (EventTypes.Name, ""), + # So we can fill in the `tombstone_successor_room_id` column + (EventTypes.Tombstone, ""), +) + @attr.s(slots=True, auto_attribs=True) class DeltaState: @@ -99,6 +129,82 @@ class DeltaState: return not self.to_delete and not self.to_insert and not self.no_longer_in_room +# We want `total=False` because we want to allow values to be unset. +class SlidingSyncStateInsertValues(TypedDict, total=False): + """ + Insert values relevant for the `sliding_sync_joined_rooms` and + `sliding_sync_membership_snapshots` database tables. + """ + + room_type: Optional[str] + is_encrypted: Optional[bool] + room_name: Optional[str] + tombstone_successor_room_id: Optional[str] + + +class SlidingSyncMembershipSnapshotSharedInsertValues( + SlidingSyncStateInsertValues, total=False +): + """ + Insert values for `sliding_sync_membership_snapshots` that we can share across + multiple memberships + """ + + has_known_state: Optional[bool] + + +@attr.s(slots=True, auto_attribs=True) +class SlidingSyncMembershipInfo: + """ + Values unique to each membership + """ + + user_id: str + sender: str + membership_event_id: str + membership: str + membership_event_stream_ordering: int + membership_event_instance_name: str + + +@attr.s(slots=True, auto_attribs=True) +class SlidingSyncTableChanges: + room_id: str + # `stream_ordering` of the most recent event being persisted in the room. This doesn't + # need to be perfect, we just need *some* answer that points to a real event in the + # room in case we are the first ones inserting into the `sliding_sync_joined_rooms` + # table because of the `NON NULL` constraint on `event_stream_ordering`. In reality, + # `_update_sliding_sync_tables_with_new_persisted_events_txn()` is run after + # `_update_current_state_txn()` whenever a new event is persisted to update it to the + # correct latest value. + # + # This should be *some* value that points to a real event in the room if we are + # still joined to the room and some state is changing (`to_insert` or `to_delete`). + joined_room_best_effort_most_recent_stream_ordering: Optional[int] + # If the row doesn't exist in the `sliding_sync_joined_rooms` table, we need to + # fully-insert it which means we also need to include a `bump_stamp` value to use + # for the row. This should only be populated when we're trying to fully-insert a + # row. + # + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + joined_room_bump_stamp_to_fully_insert: Optional[int] + # Values to upsert into `sliding_sync_joined_rooms` + joined_room_updates: SlidingSyncStateInsertValues + + # Shared values to upsert into `sliding_sync_membership_snapshots` for each + # `to_insert_membership_snapshots` + membership_snapshot_shared_insert_values: ( + SlidingSyncMembershipSnapshotSharedInsertValues + ) + # List of membership to insert into `sliding_sync_membership_snapshots` + to_insert_membership_snapshots: List[SlidingSyncMembershipInfo] + # List of user_id to delete from `sliding_sync_membership_snapshots` + to_delete_membership_snapshots: List[str] + + @attr.s(slots=True, auto_attribs=True) class NewEventChainLinks: """Information about new auth chain links that need to be added to the DB. @@ -226,6 +332,14 @@ class PersistEventsStore: event.internal_metadata.stream_ordering = stream event.internal_metadata.instance_name = self._instance_name + sliding_sync_table_changes = None + if state_delta_for_room is not None: + sliding_sync_table_changes = ( + await self._calculate_sliding_sync_table_changes( + room_id, events_and_contexts, state_delta_for_room + ) + ) + await self.db_pool.runInteraction( "persist_events", self._persist_events_txn, @@ -235,6 +349,7 @@ class PersistEventsStore: state_delta_for_room=state_delta_for_room, new_forward_extremities=new_forward_extremities, new_event_links=new_event_links, + sliding_sync_table_changes=sliding_sync_table_changes, ) persist_event_counter.inc(len(events_and_contexts)) @@ -261,6 +376,341 @@ class PersistEventsStore: (room_id,), frozenset(new_forward_extremities) ) + async def _calculate_sliding_sync_table_changes( + self, + room_id: str, + events_and_contexts: Sequence[Tuple[EventBase, EventContext]], + delta_state: DeltaState, + ) -> SlidingSyncTableChanges: + """ + Calculate the changes to the `sliding_sync_membership_snapshots` and + `sliding_sync_joined_rooms` tables given the deltas that are going to be used to + update the `current_state_events` table. + + Just a bunch of pre-processing so we so we don't need to spend time in the + transaction itself gathering all of this info. It's also easier to deal with + redactions outside of a transaction. + + Args: + room_id: The room ID currently being processed. + events_and_contexts: List of tuples of (event, context) being persisted. + This is completely optional (you can pass an empty list) and will just + save us from fetching the events from the database if we already have + them. We assume the list is sorted ascending by `stream_ordering`. We + don't care about the sort when the events are backfilled (with negative + `stream_ordering`). + delta_state: Deltas that are going to be used to update the + `current_state_events` table. Changes to the current state of the room. + """ + to_insert = delta_state.to_insert + to_delete = delta_state.to_delete + + # If no state is changing, we don't need to do anything. This can happen when a + # partial-stated room is re-syncing the current state. + if not to_insert and not to_delete: + return SlidingSyncTableChanges( + room_id=room_id, + joined_room_best_effort_most_recent_stream_ordering=None, + joined_room_bump_stamp_to_fully_insert=None, + joined_room_updates={}, + membership_snapshot_shared_insert_values={}, + to_insert_membership_snapshots=[], + to_delete_membership_snapshots=[], + ) + + event_map = {event.event_id: event for event, _ in events_and_contexts} + + # Handle gathering info for the `sliding_sync_membership_snapshots` table + # + # This would only happen if someone was state reset out of the room + user_ids_to_delete_membership_snapshots = [ + state_key + for event_type, state_key in to_delete + if event_type == EventTypes.Member and self.is_mine_id(state_key) + ] + + membership_snapshot_shared_insert_values: ( + SlidingSyncMembershipSnapshotSharedInsertValues + ) = {} + membership_infos_to_insert_membership_snapshots: List[ + SlidingSyncMembershipInfo + ] = [] + if to_insert: + membership_event_id_to_user_id_map: Dict[str, str] = {} + for state_key, event_id in to_insert.items(): + if state_key[0] == EventTypes.Member and self.is_mine_id(state_key[1]): + membership_event_id_to_user_id_map[event_id] = state_key[1] + + membership_event_map: Dict[str, EventBase] = {} + # In normal event persist scenarios, we should be able to find the + # membership events in the `events_and_contexts` given to us but it's + # possible a state reset happened which added us to the room without a + # corresponding new membership event (reset back to a previous membership). + missing_membership_event_ids: Set[str] = set() + for membership_event_id in membership_event_id_to_user_id_map.keys(): + membership_event = event_map.get(membership_event_id) + if membership_event: + membership_event_map[membership_event_id] = membership_event + else: + missing_membership_event_ids.add(membership_event_id) + + # Otherwise, we need to find a couple events that we were reset to. + if missing_membership_event_ids: + remaining_events = await self.store.get_events( + missing_membership_event_ids + ) + # There shouldn't be any missing events + assert ( + remaining_events.keys() == missing_membership_event_ids + ), missing_membership_event_ids.difference(remaining_events.keys()) + membership_event_map.update(remaining_events) + + for ( + membership_event_id, + user_id, + ) in membership_event_id_to_user_id_map.items(): + # We should only be seeing events with `stream_ordering`/`instance_name` assigned by this point + membership_event_stream_ordering = membership_event_map[ + membership_event_id + ].internal_metadata.stream_ordering + assert membership_event_stream_ordering is not None + membership_event_instance_name = membership_event_map[ + membership_event_id + ].internal_metadata.instance_name + assert membership_event_instance_name is not None + + membership_infos_to_insert_membership_snapshots.append( + SlidingSyncMembershipInfo( + user_id=user_id, + sender=membership_event_map[membership_event_id].sender, + membership_event_id=membership_event_id, + membership=membership_event_map[membership_event_id].membership, + membership_event_stream_ordering=membership_event_stream_ordering, + membership_event_instance_name=membership_event_instance_name, + ) + ) + + if membership_infos_to_insert_membership_snapshots: + current_state_ids_map: MutableStateMap[str] = dict( + await self.store.get_partial_filtered_current_state_ids( + room_id, + state_filter=StateFilter.from_types( + SLIDING_SYNC_RELEVANT_STATE_SET + ), + ) + ) + # Since we fetched the current state before we took `to_insert`/`to_delete` + # into account, we need to do a couple fixups. + # + # Update the current_state_map with what we have `to_delete` + for state_key in to_delete: + current_state_ids_map.pop(state_key, None) + # Update the current_state_map with what we have `to_insert` + for state_key, event_id in to_insert.items(): + if state_key in SLIDING_SYNC_RELEVANT_STATE_SET: + current_state_ids_map[state_key] = event_id + + current_state_map: MutableStateMap[EventBase] = {} + # In normal event persist scenarios, we probably won't be able to find + # these state events in `events_and_contexts` since we don't generally + # batch up local membership changes with other events, but it can + # happen. + missing_state_event_ids: Set[str] = set() + for state_key, event_id in current_state_ids_map.items(): + event = event_map.get(event_id) + if event: + current_state_map[state_key] = event + else: + missing_state_event_ids.add(event_id) + + # Otherwise, we need to find a couple events + if missing_state_event_ids: + remaining_events = await self.store.get_events( + missing_state_event_ids + ) + # There shouldn't be any missing events + assert ( + remaining_events.keys() == missing_state_event_ids + ), missing_state_event_ids.difference(remaining_events.keys()) + for event in remaining_events.values(): + current_state_map[(event.type, event.state_key)] = event + + if current_state_map: + state_insert_values = PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + current_state_map + ) + membership_snapshot_shared_insert_values.update(state_insert_values) + # We have current state to work from + membership_snapshot_shared_insert_values["has_known_state"] = True + else: + # We don't have any `current_state_events` anymore (previously + # cleared out because of `no_longer_in_room`). This can happen if + # one user is joined and another is invited (some non-join + # membership). If the joined user leaves, we are `no_longer_in_room` + # and `current_state_events` is cleared out. When the invited user + # rejects the invite (leaves the room), we will end up here. + # + # In these cases, we should inherit the meta data from the previous + # snapshot so we shouldn't update any of the state values. When + # using sliding sync filters, this will prevent the room from + # disappearing/appearing just because you left the room. + # + # Ideally, we could additionally assert that we're only here for + # valid non-join membership transitions. + assert delta_state.no_longer_in_room + + # Handle gathering info for the `sliding_sync_joined_rooms` table + # + # We only deal with + # updating the state related columns. The + # `event_stream_ordering`/`bump_stamp` are updated elsewhere in the event + # persisting stack (see + # `_update_sliding_sync_tables_with_new_persisted_events_txn()`) + # + joined_room_updates: SlidingSyncStateInsertValues = {} + best_effort_most_recent_stream_ordering: Optional[int] = None + bump_stamp_to_fully_insert: Optional[int] = None + if not delta_state.no_longer_in_room: + current_state_ids_map = {} + + # Always fully-insert rows if they don't already exist in the + # `sliding_sync_joined_rooms` table. This way we can rely on a row if it + # exists in the table. + # + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + existing_row_in_table = await self.store.db_pool.simple_select_one_onecol( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + retcol="room_id", + allow_none=True, + ) + if not existing_row_in_table: + most_recent_bump_event_pos_results = ( + await self.store.get_last_event_pos_in_room( + room_id, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) + ) + bump_stamp_to_fully_insert = ( + most_recent_bump_event_pos_results[1].stream + if most_recent_bump_event_pos_results is not None + else None + ) + + current_state_ids_map = dict( + await self.store.get_partial_filtered_current_state_ids( + room_id, + state_filter=StateFilter.from_types( + SLIDING_SYNC_RELEVANT_STATE_SET + ), + ) + ) + + # Look through the items we're going to insert into the current state to see + # if there is anything that we care about and should also update in the + # `sliding_sync_joined_rooms` table. + for state_key, event_id in to_insert.items(): + if state_key in SLIDING_SYNC_RELEVANT_STATE_SET: + current_state_ids_map[state_key] = event_id + + # Get the full event objects for the current state events + # + # In normal event persist scenarios, we should be able to find the state + # events in the `events_and_contexts` given to us but it's possible a state + # reset happened which that reset back to a previous state. + current_state_map = {} + missing_event_ids: Set[str] = set() + for state_key, event_id in current_state_ids_map.items(): + event = event_map.get(event_id) + if event: + current_state_map[state_key] = event + else: + missing_event_ids.add(event_id) + + # Otherwise, we need to find a couple events that we were reset to. + if missing_event_ids: + remaining_events = await self.store.get_events( + current_state_ids_map.values() + ) + # There shouldn't be any missing events + assert ( + remaining_events.keys() == missing_event_ids + ), missing_event_ids.difference(remaining_events.keys()) + for event in remaining_events.values(): + current_state_map[(event.type, event.state_key)] = event + + joined_room_updates = ( + PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + current_state_map + ) + ) + + # If something is being deleted from the state, we need to clear it out + for state_key in to_delete: + if state_key == (EventTypes.Create, ""): + joined_room_updates["room_type"] = None + elif state_key == (EventTypes.RoomEncryption, ""): + joined_room_updates["is_encrypted"] = False + elif state_key == (EventTypes.Name, ""): + joined_room_updates["room_name"] = None + + # Figure out `best_effort_most_recent_stream_ordering`. This doesn't need to + # be perfect, we just need *some* answer that points to a real event in the + # room in case we are the first ones inserting into the + # `sliding_sync_joined_rooms` table because of the `NON NULL` constraint on + # `event_stream_ordering`. In reality, + # `_update_sliding_sync_tables_with_new_persisted_events_txn()` is run after + # `_update_current_state_txn()` whenever a new event is persisted to update + # it to the correct latest value. + # + if len(events_and_contexts) > 0: + # Since the list is sorted ascending by `stream_ordering`, the last event + # should have the highest `stream_ordering`. + best_effort_most_recent_stream_ordering = events_and_contexts[-1][ + 0 + ].internal_metadata.stream_ordering + else: + # If there are no `events_and_contexts`, we assume it's one of two scenarios: + # 1. If there are new state `to_insert` but no `events_and_contexts`, + # then it's a state reset. + # 2. Otherwise, it's some partial-state room re-syncing the current state and + # going through un-partial process. + # + # Either way, we assume no new events are being persisted and we can + # find the latest already in the database. Since this is a best-effort + # value, we don't need to be perfect although I think we're pretty close + # here. + most_recent_event_pos_results = ( + await self.store.get_last_event_pos_in_room( + room_id, event_types=None + ) + ) + assert most_recent_event_pos_results, ( + f"We should not be seeing `None` here because we are still in the room ({room_id}) and " + + "it should at-least have a join membership event that's keeping us here." + ) + best_effort_most_recent_stream_ordering = most_recent_event_pos_results[ + 1 + ].stream + + # We should have found a value if we are still in the room + assert best_effort_most_recent_stream_ordering is not None + + return SlidingSyncTableChanges( + room_id=room_id, + # For `sliding_sync_joined_rooms` + joined_room_best_effort_most_recent_stream_ordering=best_effort_most_recent_stream_ordering, + joined_room_bump_stamp_to_fully_insert=bump_stamp_to_fully_insert, + joined_room_updates=joined_room_updates, + # For `sliding_sync_membership_snapshots` + membership_snapshot_shared_insert_values=membership_snapshot_shared_insert_values, + to_insert_membership_snapshots=membership_infos_to_insert_membership_snapshots, + to_delete_membership_snapshots=user_ids_to_delete_membership_snapshots, + ) + async def calculate_chain_cover_index_for_events( self, room_id: str, events: Collection[EventBase] ) -> Dict[str, NewEventChainLinks]: @@ -458,6 +908,7 @@ class PersistEventsStore: state_delta_for_room: Optional[DeltaState], new_forward_extremities: Optional[Set[str]], new_event_links: Dict[str, NewEventChainLinks], + sliding_sync_table_changes: Optional[SlidingSyncTableChanges], ) -> None: """Insert some number of room events into the necessary database tables. @@ -478,9 +929,14 @@ class PersistEventsStore: delete_existing True to purge existing table rows for the events from the database. This is useful when retrying due to IntegrityError. - state_delta_for_room: The current-state delta for the room. + state_delta_for_room: Deltas that are going to be used to update the + `current_state_events` table. Changes to the current state of the room. new_forward_extremities: The new forward extremities for the room: a set of the event ids which are the forward extremities. + sliding_sync_table_changes: Changes to the + `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` tables + derived from the given `delta_state` (see + `_calculate_sliding_sync_table_changes(...)`) Raises: PartialStateConflictError: if attempting to persist a partial state event in @@ -590,10 +1046,22 @@ class PersistEventsStore: # room_memberships, where applicable. # NB: This function invalidates all state related caches if state_delta_for_room: + # If the state delta exists, the sliding sync table changes should also exist + assert sliding_sync_table_changes is not None + self._update_current_state_txn( - txn, room_id, state_delta_for_room, min_stream_order + txn, + room_id, + state_delta_for_room, + min_stream_order, + sliding_sync_table_changes, ) + # We only update the sliding sync tables for non-backfilled events. + self._update_sliding_sync_tables_with_new_persisted_events_txn( + txn, room_id, events_and_contexts + ) + def _persist_event_auth_chain_txn( self, txn: LoggingTransaction, @@ -1128,8 +1596,20 @@ class PersistEventsStore: self, room_id: str, state_delta: DeltaState, + sliding_sync_table_changes: SlidingSyncTableChanges, ) -> None: - """Update the current state stored in the datatabase for the given room""" + """ + Update the current state stored in the datatabase for the given room + + Args: + room_id + state_delta: Deltas that are going to be used to update the + `current_state_events` table. Changes to the current state of the room. + sliding_sync_table_changes: Changes to the + `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` tables + derived from the given `delta_state` (see + `_calculate_sliding_sync_table_changes(...)`) + """ if state_delta.is_noop(): return @@ -1141,6 +1621,7 @@ class PersistEventsStore: room_id, delta_state=state_delta, stream_id=stream_ordering, + sliding_sync_table_changes=sliding_sync_table_changes, ) def _update_current_state_txn( @@ -1149,16 +1630,34 @@ class PersistEventsStore: room_id: str, delta_state: DeltaState, stream_id: int, + sliding_sync_table_changes: SlidingSyncTableChanges, ) -> None: + """ + Handles updating tables that track the current state of a room. + + Args: + txn + room_id + delta_state: Deltas that are going to be used to update the + `current_state_events` table. Changes to the current state of the room. + stream_id: TODO + sliding_sync_table_changes: Changes to the + `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` tables + derived from the given `delta_state` (see + `_calculate_sliding_sync_table_changes(...)`) + """ to_delete = delta_state.to_delete to_insert = delta_state.to_insert + # Sanity check we're processing the same thing + assert room_id == sliding_sync_table_changes.room_id + # Figure out the changes of membership to invalidate the # `get_rooms_for_user` cache. # We find out which membership events we may have deleted # and which we have added, then we invalidate the caches for all # those users. - members_changed = { + members_to_cache_bust = { state_key for ev_type, state_key in itertools.chain(to_delete, to_insert) if ev_type == EventTypes.Member @@ -1182,16 +1681,22 @@ class PersistEventsStore: """ txn.execute(sql, (stream_id, self._instance_name, room_id)) + # Grab the list of users before we clear out the current state + users_in_room = self.store.get_users_in_room_txn(txn, room_id) # We also want to invalidate the membership caches for users # that were in the room. - users_in_room = self.store.get_users_in_room_txn(txn, room_id) - members_changed.update(users_in_room) + members_to_cache_bust.update(users_in_room) self.db_pool.simple_delete_txn( txn, table="current_state_events", keyvalues={"room_id": room_id}, ) + self.db_pool.simple_delete_txn( + txn, + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + ) else: # We're still in the room, so we update the current state as normal. @@ -1260,6 +1765,41 @@ class PersistEventsStore: ], ) + # Handle updating the `sliding_sync_joined_rooms` table. We only deal with + # updating the state related columns. The + # `event_stream_ordering`/`bump_stamp` are updated elsewhere in the event + # persisting stack (see + # `_update_sliding_sync_tables_with_new_persisted_events_txn()`) + # + # We only need to update when one of the relevant state values has changed + if sliding_sync_table_changes.joined_room_updates: + # This should be *some* value that points to a real event in the room if + # we are still joined to the room. + assert ( + sliding_sync_table_changes.joined_room_best_effort_most_recent_stream_ordering + is not None + ) + + self.db_pool.simple_upsert_txn( + txn, + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + values=sliding_sync_table_changes.joined_room_updates, + insertion_values={ + # The reason we're only *inserting* (not *updating*) + # `event_stream_ordering` here is because the column has a `NON + # NULL` constraint and we need *some* answer. And if the row + # already exists, it already has the correct value and it's + # better to just rely on + # `_update_sliding_sync_tables_with_new_persisted_events_txn()` + # to do the right thing (same for `bump_stamp`). + "event_stream_ordering": sliding_sync_table_changes.joined_room_best_effort_most_recent_stream_ordering, + # If we're trying to fully-insert a row, we need to provide a + # value for `bump_stamp` if it exists for the room. + "bump_stamp": sliding_sync_table_changes.joined_room_bump_stamp_to_fully_insert, + }, + ) + # We now update `local_current_membership`. We do this regardless # of whether we're still in the room or not to handle the case where # e.g. we just got banned (where we need to record that fact here). @@ -1296,6 +1836,60 @@ class PersistEventsStore: ], ) + # Handle updating the `sliding_sync_membership_snapshots` table + # + # This would only happen if someone was state reset out of the room + if sliding_sync_table_changes.to_delete_membership_snapshots: + self.db_pool.simple_delete_many_txn( + txn, + table="sliding_sync_membership_snapshots", + column="user_id", + values=sliding_sync_table_changes.to_delete_membership_snapshots, + keyvalues={"room_id": room_id}, + ) + + # We do this regardless of whether the server is `no_longer_in_room` or not + # because we still want a row if a local user was just left/kicked or got banned + # from the room. + if sliding_sync_table_changes.to_insert_membership_snapshots: + # Update the `sliding_sync_membership_snapshots` table + # + # We need to insert/update regardless of whether we have `sliding_sync_snapshot_keys` + # because there are other fields in the `ON CONFLICT` upsert to run (see + # inherit case above for more context when this happens). + self.db_pool.simple_upsert_many_txn( + txn=txn, + table="sliding_sync_membership_snapshots", + key_names=("room_id", "user_id"), + key_values=[ + (room_id, membership_info.user_id) + for membership_info in sliding_sync_table_changes.to_insert_membership_snapshots + ], + value_names=[ + "sender", + "membership_event_id", + "membership", + "event_stream_ordering", + "event_instance_name", + ] + + list( + sliding_sync_table_changes.membership_snapshot_shared_insert_values.keys() + ), + value_values=[ + [ + membership_info.sender, + membership_info.membership_event_id, + membership_info.membership, + membership_info.membership_event_stream_ordering, + membership_info.membership_event_instance_name, + ] + + list( + sliding_sync_table_changes.membership_snapshot_shared_insert_values.values() + ) + for membership_info in sliding_sync_table_changes.to_insert_membership_snapshots + ], + ) + txn.call_after( self.store._curr_state_delta_stream_cache.entity_has_changed, room_id, @@ -1303,14 +1897,303 @@ class PersistEventsStore: ) # Invalidate the various caches - self.store._invalidate_state_caches_and_stream(txn, room_id, members_changed) + self.store._invalidate_state_caches_and_stream( + txn, room_id, members_to_cache_bust + ) # Check if any of the remote membership changes requires us to # unsubscribe from their device lists. self.store.handle_potentially_left_users_txn( - txn, {m for m in members_changed if not self.hs.is_mine_id(m)} + txn, {m for m in members_to_cache_bust if not self.hs.is_mine_id(m)} ) + @classmethod + def _get_relevant_sliding_sync_current_state_event_ids_txn( + cls, txn: LoggingTransaction, room_id: str + ) -> MutableStateMap[str]: + """ + Fetch the current state event IDs for the relevant (to the + `sliding_sync_joined_rooms` table) state types for the given room. + + Returns: + A tuple of: + 1. StateMap of event IDs necessary to to fetch the relevant state values + needed to insert into the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots`. + 2. The corresponding latest `stream_id` in the + `current_state_delta_stream` table. This is useful to compare against + the `current_state_delta_stream` table later so you can check whether + the current state has changed since you last fetched the current + state. + """ + # Fetch the current state event IDs from the database + ( + event_type_and_state_key_in_list_clause, + event_type_and_state_key_args, + ) = make_tuple_in_list_sql_clause( + txn.database_engine, + ("type", "state_key"), + SLIDING_SYNC_RELEVANT_STATE_SET, + ) + txn.execute( + f""" + SELECT c.event_id, c.type, c.state_key + FROM current_state_events AS c + WHERE + c.room_id = ? + AND {event_type_and_state_key_in_list_clause} + """, + [room_id] + event_type_and_state_key_args, + ) + current_state_map: MutableStateMap[str] = { + (event_type, state_key): event_id for event_id, event_type, state_key in txn + } + + return current_state_map + + @classmethod + def _get_sliding_sync_insert_values_from_state_map( + cls, state_map: StateMap[EventBase] + ) -> SlidingSyncStateInsertValues: + """ + Extract the relevant state values from the `state_map` needed to insert into the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables. + + Returns: + Map from column names (`room_type`, `is_encrypted`, `room_name`) to relevant + state values needed to insert into + the `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables. + """ + # Map of values to insert/update in the `sliding_sync_membership_snapshots` table + sliding_sync_insert_map: SlidingSyncStateInsertValues = {} + + # Parse the raw event JSON + for state_key, event in state_map.items(): + if state_key == (EventTypes.Create, ""): + room_type = event.content.get(EventContentFields.ROOM_TYPE) + # Scrutinize JSON values + if room_type is None or isinstance(room_type, str): + sliding_sync_insert_map["room_type"] = room_type + elif state_key == (EventTypes.RoomEncryption, ""): + encryption_algorithm = event.content.get( + EventContentFields.ENCRYPTION_ALGORITHM + ) + is_encrypted = encryption_algorithm is not None + sliding_sync_insert_map["is_encrypted"] = is_encrypted + elif state_key == (EventTypes.Name, ""): + room_name = event.content.get(EventContentFields.ROOM_NAME) + # Scrutinize JSON values + if room_name is None or isinstance(room_name, str): + sliding_sync_insert_map["room_name"] = room_name + elif state_key == (EventTypes.Tombstone, ""): + successor_room_id = event.content.get( + EventContentFields.TOMBSTONE_SUCCESSOR_ROOM + ) + # Scrutinize JSON values + if successor_room_id is None or isinstance(successor_room_id, str): + sliding_sync_insert_map["tombstone_successor_room_id"] = ( + successor_room_id + ) + else: + # We only expect to see events according to the + # `SLIDING_SYNC_RELEVANT_STATE_SET`. + raise AssertionError( + "Unexpected event (we should not be fetching extra events or this " + + "piece of code needs to be updated to handle a new event type added " + + "to `SLIDING_SYNC_RELEVANT_STATE_SET`): {state_key} {event.event_id}" + ) + + return sliding_sync_insert_map + + @classmethod + def _get_sliding_sync_insert_values_from_stripped_state( + cls, unsigned_stripped_state_events: Any + ) -> SlidingSyncMembershipSnapshotSharedInsertValues: + """ + Pull out the relevant state values from the stripped state on an invite or knock + membership event needed to insert into the `sliding_sync_membership_snapshots` + tables. + + Returns: + Map from column names (`room_type`, `is_encrypted`, `room_name`) to relevant + state values needed to insert into the `sliding_sync_membership_snapshots` tables. + """ + # Map of values to insert/update in the `sliding_sync_membership_snapshots` table + sliding_sync_insert_map: SlidingSyncMembershipSnapshotSharedInsertValues = {} + + if unsigned_stripped_state_events is not None: + stripped_state_map: MutableStateMap[StrippedStateEvent] = {} + if isinstance(unsigned_stripped_state_events, list): + for raw_stripped_event in unsigned_stripped_state_events: + stripped_state_event = parse_stripped_state_event( + raw_stripped_event + ) + if stripped_state_event is not None: + stripped_state_map[ + ( + stripped_state_event.type, + stripped_state_event.state_key, + ) + ] = stripped_state_event + + # If there is some stripped state, we assume the remote server passed *all* + # of the potential stripped state events for the room. + create_stripped_event = stripped_state_map.get((EventTypes.Create, "")) + # Sanity check that we at-least have the create event + if create_stripped_event is not None: + sliding_sync_insert_map["has_known_state"] = True + + # XXX: Keep this up-to-date with `SLIDING_SYNC_RELEVANT_STATE_SET` + + # Find the room_type + sliding_sync_insert_map["room_type"] = ( + create_stripped_event.content.get(EventContentFields.ROOM_TYPE) + if create_stripped_event is not None + else None + ) + + # Find whether the room is_encrypted + encryption_stripped_event = stripped_state_map.get( + (EventTypes.RoomEncryption, "") + ) + encryption = ( + encryption_stripped_event.content.get( + EventContentFields.ENCRYPTION_ALGORITHM + ) + if encryption_stripped_event is not None + else None + ) + sliding_sync_insert_map["is_encrypted"] = encryption is not None + + # Find the room_name + room_name_stripped_event = stripped_state_map.get((EventTypes.Name, "")) + sliding_sync_insert_map["room_name"] = ( + room_name_stripped_event.content.get(EventContentFields.ROOM_NAME) + if room_name_stripped_event is not None + else None + ) + + # Find the tombstone_successor_room_id + # Note: This isn't one of the stripped state events according to the spec + # but seems like there is no reason not to support this kind of thing. + tombstone_stripped_event = stripped_state_map.get( + (EventTypes.Tombstone, "") + ) + sliding_sync_insert_map["tombstone_successor_room_id"] = ( + tombstone_stripped_event.content.get( + EventContentFields.TOMBSTONE_SUCCESSOR_ROOM + ) + if tombstone_stripped_event is not None + else None + ) + + else: + # No stripped state provided + sliding_sync_insert_map["has_known_state"] = False + sliding_sync_insert_map["room_type"] = None + sliding_sync_insert_map["room_name"] = None + sliding_sync_insert_map["is_encrypted"] = False + else: + # No stripped state provided + sliding_sync_insert_map["has_known_state"] = False + sliding_sync_insert_map["room_type"] = None + sliding_sync_insert_map["room_name"] = None + sliding_sync_insert_map["is_encrypted"] = False + + return sliding_sync_insert_map + + def _update_sliding_sync_tables_with_new_persisted_events_txn( + self, + txn: LoggingTransaction, + room_id: str, + events_and_contexts: List[Tuple[EventBase, EventContext]], + ) -> None: + """ + Update the latest `event_stream_ordering`/`bump_stamp` columns in the + `sliding_sync_joined_rooms` table for the room with new events. + + This function assumes that `_store_event_txn()` (to persist the event) and + `_update_current_state_txn(...)` (so that `sliding_sync_joined_rooms` table has + been updated with rooms that were joined) have already been run. + + Args: + txn + room_id: The room that all of the events belong to + events_and_contexts: The events being persisted. We assume the list is + sorted ascending by `stream_ordering`. We don't care about the sort when the + events are backfilled (with negative `stream_ordering`). + """ + + # Nothing to do if there are no events + if len(events_and_contexts) == 0: + return + + # We only update the sliding sync tables for non-backfilled events. + # + # Check if the first event is a backfilled event (with a negative + # `stream_ordering`). If one event is backfilled, we assume this whole batch was + # backfilled. + first_event_stream_ordering = events_and_contexts[0][ + 0 + ].internal_metadata.stream_ordering + # This should exist for persisted events + assert first_event_stream_ordering is not None + if first_event_stream_ordering < 0: + return + + # Since the list is sorted ascending by `stream_ordering`, the last event should + # have the highest `stream_ordering`. + max_stream_ordering = events_and_contexts[-1][ + 0 + ].internal_metadata.stream_ordering + max_bump_stamp = None + for event, _ in reversed(events_and_contexts): + # Sanity check that all events belong to the same room + assert event.room_id == room_id + + if event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES: + # This should exist for persisted events + assert event.internal_metadata.stream_ordering is not None + + max_bump_stamp = event.internal_metadata.stream_ordering + + # Since we're iterating in reverse, we can break as soon as we find a + # matching bump event which should have the highest `stream_ordering`. + break + + # We should have exited earlier if there were no events + assert ( + max_stream_ordering is not None + ), "Expected to have a stream_ordering if we have events" + + # Handle updating the `sliding_sync_joined_rooms` table. + # + txn.execute( + """ + UPDATE sliding_sync_joined_rooms + SET + event_stream_ordering = CASE + WHEN event_stream_ordering IS NULL OR event_stream_ordering < ? + THEN ? + ELSE event_stream_ordering + END, + bump_stamp = CASE + WHEN bump_stamp IS NULL OR bump_stamp < ? + THEN ? + ELSE bump_stamp + END + WHERE room_id = ? + """, + ( + max_stream_ordering, + max_stream_ordering, + max_bump_stamp, + max_bump_stamp, + room_id, + ), + ) + # This may or may not update any rows depending if we are `no_longer_in_room` + def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str) -> None: """Update the room version in the database based off current state events. @@ -1931,7 +2814,9 @@ class PersistEventsStore: ) for event in events: + # Sanity check that we're working with persisted events assert event.internal_metadata.stream_ordering is not None + assert event.internal_metadata.instance_name is not None # We update the local_current_membership table only if the event is # "current", i.e., its something that has just happened. @@ -1945,6 +2830,16 @@ class PersistEventsStore: and event.internal_metadata.is_outlier() and event.internal_metadata.is_out_of_band_membership() ): + # The only sort of out-of-band-membership events we expect to see here + # are remote invites/knocks and LEAVE events corresponding to + # rejected/retracted invites and rescinded knocks. + assert event.type == EventTypes.Member + assert event.membership in ( + Membership.INVITE, + Membership.KNOCK, + Membership.LEAVE, + ) + self.db_pool.simple_upsert_txn( txn, table="local_current_membership", @@ -1956,6 +2851,56 @@ class PersistEventsStore: }, ) + # Handle updating the `sliding_sync_membership_snapshots` table + # (out-of-band membership events only) + # + raw_stripped_state_events = None + if event.membership == Membership.INVITE: + invite_room_state = event.unsigned.get("invite_room_state") + raw_stripped_state_events = invite_room_state + elif event.membership == Membership.KNOCK: + knock_room_state = event.unsigned.get("knock_room_state") + raw_stripped_state_events = knock_room_state + + insert_values = { + "sender": event.sender, + "membership_event_id": event.event_id, + "membership": event.membership, + "event_stream_ordering": event.internal_metadata.stream_ordering, + "event_instance_name": event.internal_metadata.instance_name, + } + if event.membership == Membership.LEAVE: + # Inherit the meta data from the remote invite/knock. When using + # sliding sync filters, this will prevent the room from + # disappearing/appearing just because you left the room. + pass + elif event.membership in (Membership.INVITE, Membership.KNOCK): + extra_insert_values = ( + self._get_sliding_sync_insert_values_from_stripped_state( + raw_stripped_state_events + ) + ) + insert_values.update(extra_insert_values) + else: + # We don't know how to handle this type of membership yet + # + # FIXME: We should use `assert_never` here but for some reason + # the exhaustive matching doesn't recognize the `Never` here. + # assert_never(event.membership) + raise AssertionError( + f"Unexpected out-of-band membership {event.membership} ({event.event_id}) that we don't know how to handle yet" + ) + + self.db_pool.simple_upsert_txn( + txn, + table="sliding_sync_membership_snapshots", + keyvalues={ + "room_id": event.room_id, + "user_id": event.state_key, + }, + values=insert_values, + ) + def _handle_event_relations( self, txn: LoggingTransaction, event: EventBase ) -> None: diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 64d303e330..88ff5aa2df 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -24,9 +24,9 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast import attr -from synapse.api.constants import EventContentFields, RelationTypes +from synapse.api.constants import EventContentFields, Membership, RelationTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.events import make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, @@ -34,9 +34,21 @@ from synapse.storage.database import ( LoggingTransaction, make_tuple_comparison_clause, ) -from synapse.storage.databases.main.events import PersistEventsStore +from synapse.storage.databases.main.events import ( + SLIDING_SYNC_RELEVANT_STATE_SET, + PersistEventsStore, + SlidingSyncMembershipInfo, + SlidingSyncMembershipSnapshotSharedInsertValues, + SlidingSyncStateInsertValues, +) +from synapse.storage.databases.main.state_deltas import StateDeltasStore +from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.types import Cursor -from synapse.types import JsonDict, StrCollection +from synapse.types import JsonDict, RoomStreamToken, StateMap, StrCollection +from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES +from synapse.types.state import StateFilter +from synapse.util import json_encoder +from synapse.util.iterutils import batch_iter if TYPE_CHECKING: from synapse.server import HomeServer @@ -78,6 +90,14 @@ class _BackgroundUpdates: EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index" + SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE = ( + "sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update" + ) + SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE = "sliding_sync_joined_rooms_bg_update" + SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE = ( + "sliding_sync_membership_snapshots_bg_update" + ) + @attr.s(slots=True, frozen=True, auto_attribs=True) class _CalculateChainCover: @@ -97,7 +117,19 @@ class _CalculateChainCover: finished_room_map: Dict[str, Tuple[int, int]] -class EventsBackgroundUpdatesStore(SQLBaseStore): +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _JoinedRoomStreamOrderingUpdate: + """ + Intermediate container class used in `SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE` + """ + + # The most recent event stream_ordering for the room + most_recent_event_stream_ordering: int + # The most recent event `bump_stamp` for the room + most_recent_bump_stamp: Optional[int] + + +class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseStore): def __init__( self, database: DatabasePool, @@ -279,6 +311,34 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): where_clause="NOT outlier", ) + # Handle background updates for Sliding Sync tables + # + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, + self._sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update, + ) + # Add some background updates to populate the sliding sync tables + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE, + self._sliding_sync_joined_rooms_bg_update, + ) + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + self._sliding_sync_membership_snapshots_bg_update, + ) + + # We want this to run on the main database at startup before we start processing + # events. + # + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + with db_conn.cursor(txn_name="resolve_sliding_sync") as txn: + _resolve_stale_data_in_sliding_sync_tables( + txn=txn, + ) + async def _background_reindex_fields_sender( self, progress: JsonDict, batch_size: int ) -> int: @@ -1073,7 +1133,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): PersistEventsStore._add_chain_cover_index( txn, self.db_pool, - self.event_chain_id_gen, # type: ignore[attr-defined] + self.event_chain_id_gen, event_to_room_id, event_to_types, cast(Dict[str, StrCollection], event_to_auth_chain), @@ -1516,3 +1576,961 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): ) return batch_size + + async def _sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update( + self, progress: JsonDict, _batch_size: int + ) -> int: + """ + Prefill `sliding_sync_joined_rooms_to_recalculate` table with all rooms we know about already. + """ + + def _txn(txn: LoggingTransaction) -> None: + # We do this as one big bulk insert. This has been tested on a bigger + # homeserver with ~10M rooms and took 60s. There is potential for this to + # starve disk usage while this goes on. + # + # We upsert in case we have to run this multiple times. + # + # The `WHERE TRUE` clause is to avoid "Parsing Ambiguity" + txn.execute( + """ + INSERT INTO sliding_sync_joined_rooms_to_recalculate + (room_id) + SELECT room_id FROM rooms WHERE ? + ON CONFLICT (room_id) + DO NOTHING; + """, + (True,), + ) + + await self.db_pool.runInteraction( + "_sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update", + _txn, + ) + + # Background update is done. + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE + ) + return 0 + + async def _sliding_sync_joined_rooms_bg_update( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Background update to populate the `sliding_sync_joined_rooms` table. + """ + # We don't need to fetch any progress state because we just grab the next N + # events in `sliding_sync_joined_rooms_to_recalculate` + + def _get_rooms_to_update_txn(txn: LoggingTransaction) -> List[Tuple[str]]: + """ + Returns: + A list of room ID's to update along with the progress value + (event_stream_ordering) indicating the continuation point in the + `current_state_events` table for the next batch. + """ + # Fetch the set of room IDs that we want to update + # + # We use `current_state_events` table as the barometer for whether the + # server is still participating in the room because if we're + # `no_longer_in_room`, this table would be cleared out for the given + # `room_id`. + txn.execute( + """ + SELECT room_id + FROM sliding_sync_joined_rooms_to_recalculate + LIMIT ? + """, + (batch_size,), + ) + + rooms_to_update_rows = cast(List[Tuple[str]], txn.fetchall()) + + return rooms_to_update_rows + + rooms_to_update = await self.db_pool.runInteraction( + "_sliding_sync_joined_rooms_bg_update._get_rooms_to_update_txn", + _get_rooms_to_update_txn, + ) + + if not rooms_to_update: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE + ) + return 0 + + # Map from room_id to insert/update state values in the `sliding_sync_joined_rooms` table. + joined_room_updates: Dict[str, SlidingSyncStateInsertValues] = {} + # Map from room_id to stream_ordering/bump_stamp, etc values + joined_room_stream_ordering_updates: Dict[ + str, _JoinedRoomStreamOrderingUpdate + ] = {} + # As long as we get this value before we fetch the current state, we can use it + # to check if something has changed since that point. + most_recent_current_state_delta_stream_id = ( + await self.get_max_stream_id_in_current_state_deltas() + ) + for (room_id,) in rooms_to_update: + current_state_ids_map = await self.db_pool.runInteraction( + "_sliding_sync_joined_rooms_bg_update._get_relevant_sliding_sync_current_state_event_ids_txn", + PersistEventsStore._get_relevant_sliding_sync_current_state_event_ids_txn, + room_id, + ) + + # If we're not joined to the room a) it doesn't belong in the + # `sliding_sync_joined_rooms` table so we should skip and b) we won't have + # any `current_state_events` for the room. + if not current_state_ids_map: + continue + + fetched_events = await self.get_events(current_state_ids_map.values()) + + current_state_map: StateMap[EventBase] = { + state_key: fetched_events[event_id] + for state_key, event_id in current_state_ids_map.items() + # `get_events(...)` will filter out events for unknown room versions + if event_id in fetched_events + } + + # Even if we are joined to the room, this can happen for unknown room + # versions (old room versions that aren't known anymore) since + # `get_events(...)` will filter out events for unknown room versions + if not current_state_map: + continue + + state_insert_values = ( + PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + current_state_map + ) + ) + # We should have some insert values for each room, even if they are `None` + assert state_insert_values + joined_room_updates[room_id] = state_insert_values + + # Figure out the stream_ordering of the latest event in the room + most_recent_event_pos_results = await self.get_last_event_pos_in_room( + room_id, event_types=None + ) + assert most_recent_event_pos_results is not None, ( + f"We should not be seeing `None` here because the room ({room_id}) should at-least have a create event " + + "given we pulled the room out of `current_state_events`" + ) + most_recent_event_stream_ordering = most_recent_event_pos_results[1].stream + assert most_recent_event_stream_ordering > 0, ( + "We should have at-least one event in the room (our own join membership event for example) " + + "that isn't backfilled (negative `stream_ordering`) if we are joined to the room." + ) + # Figure out the latest `bump_stamp` in the room. This could be `None` for a + # federated room you just joined where all of events are still `outliers` or + # backfilled history. In the Sliding Sync API, we default to the user's + # membership event `stream_ordering` if we don't have a `bump_stamp` so + # having it as `None` in this table is fine. + bump_stamp_event_pos_results = await self.get_last_event_pos_in_room( + room_id, event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES + ) + most_recent_bump_stamp = None + if ( + bump_stamp_event_pos_results is not None + and bump_stamp_event_pos_results[1].stream > 0 + ): + most_recent_bump_stamp = bump_stamp_event_pos_results[1].stream + + joined_room_stream_ordering_updates[room_id] = ( + _JoinedRoomStreamOrderingUpdate( + most_recent_event_stream_ordering=most_recent_event_stream_ordering, + most_recent_bump_stamp=most_recent_bump_stamp, + ) + ) + + def _fill_table_txn(txn: LoggingTransaction) -> None: + # Handle updating the `sliding_sync_joined_rooms` table + # + for ( + room_id, + update_map, + ) in joined_room_updates.items(): + joined_room_stream_ordering_update = ( + joined_room_stream_ordering_updates[room_id] + ) + event_stream_ordering = ( + joined_room_stream_ordering_update.most_recent_event_stream_ordering + ) + bump_stamp = joined_room_stream_ordering_update.most_recent_bump_stamp + + # Check if the current state has been updated since we gathered it. + # We're being careful not to insert/overwrite with stale data. + state_deltas_since_we_gathered_current_state = ( + self.get_current_state_deltas_for_room_txn( + txn, + room_id, + from_token=RoomStreamToken( + stream=most_recent_current_state_delta_stream_id + ), + to_token=None, + ) + ) + for state_delta in state_deltas_since_we_gathered_current_state: + # We only need to check for the state is relevant to the + # `sliding_sync_joined_rooms` table. + if ( + state_delta.event_type, + state_delta.state_key, + ) in SLIDING_SYNC_RELEVANT_STATE_SET: + # Raising exception so we can just exit and try again. It would + # be hard to resolve this within the transaction because we need + # to get full events out that take redactions into account. We + # could add some retry logic here, but it's easier to just let + # the background update try again. + raise Exception( + "Current state was updated after we gathered it to update " + + "`sliding_sync_joined_rooms` in the background update. " + + "Raising exception so we can just try again." + ) + + # Since we fully insert rows into `sliding_sync_joined_rooms`, we can + # just do everything on insert and `ON CONFLICT DO NOTHING`. + # + self.db_pool.simple_upsert_txn( + txn, + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + values={}, + insertion_values={ + **update_map, + # The reason we're only *inserting* (not *updating*) `event_stream_ordering` + # and `bump_stamp` is because if they are present, that means they are already + # up-to-date. + "event_stream_ordering": event_stream_ordering, + "bump_stamp": bump_stamp, + }, + ) + + # Now that we've processed all the room, we can remove them from the + # queue. + # + # Note: we need to remove all the rooms from the queue we pulled out + # from the DB, not just the ones we've processed above. Otherwise + # we'll simply keep pulling out the same rooms over and over again. + self.db_pool.simple_delete_many_batch_txn( + txn, + table="sliding_sync_joined_rooms_to_recalculate", + keys=("room_id",), + values=rooms_to_update, + ) + + await self.db_pool.runInteraction( + "sliding_sync_joined_rooms_bg_update", _fill_table_txn + ) + + return len(rooms_to_update) + + async def _sliding_sync_membership_snapshots_bg_update( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Background update to populate the `sliding_sync_membership_snapshots` table. + """ + # We do this in two phases: a) the initial phase where we go through all + # room memberships, and then b) a second phase where we look at new + # memberships (this is to handle the case where we downgrade and then + # upgrade again). + # + # We have to do this as two phases (rather than just the second phase + # where we iterate on event_stream_ordering), as the + # `event_stream_ordering` column may have null values for old rows. + # Therefore we first do the set of historic rooms and *then* look at any + # new rows (which will have a non-null `event_stream_ordering`). + initial_phase = progress.get("initial_phase") + if initial_phase is None: + # If this is the first run, store the current max stream position. + # We know we will go through all memberships less than the current + # max in the initial phase. + progress = { + "initial_phase": True, + "last_event_stream_ordering": self.get_room_max_stream_ordering(), + } + await self.db_pool.updates._background_update_progress( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + progress, + ) + initial_phase = True + + last_room_id = progress.get("last_room_id", "") + last_event_stream_ordering = progress["last_event_stream_ordering"] + + def _find_memberships_to_update_txn( + txn: LoggingTransaction, + ) -> List[ + Tuple[str, Optional[str], str, str, str, str, int, Optional[str], bool] + ]: + # Fetch the set of event IDs that we want to update + + if initial_phase: + # There are some old out-of-band memberships (before + # https://github.com/matrix-org/synapse/issues/6983) where we don't have + # the corresponding room stored in the `rooms` table`. We use `LEFT JOIN + # rooms AS r USING (room_id)` to find the rooms missing from `rooms` and + # insert a row for them below. + txn.execute( + """ + SELECT + c.room_id, + r.room_id, + c.user_id, + e.sender, + c.event_id, + c.membership, + e.stream_ordering, + e.instance_name, + e.outlier + FROM local_current_membership AS c + INNER JOIN events AS e USING (event_id) + LEFT JOIN rooms AS r ON (c.room_id = r.room_id) + WHERE c.room_id > ? + ORDER BY c.room_id ASC + LIMIT ? + """, + (last_room_id, batch_size), + ) + elif last_event_stream_ordering is not None: + # It's important to sort by `event_stream_ordering` *ascending* (oldest to + # newest) so that if we see that this background update in progress and want + # to start the catch-up process, we can safely assume that it will + # eventually get to the rooms we want to catch-up on anyway (see + # `_resolve_stale_data_in_sliding_sync_tables()`). + # + # `c.room_id` is duplicated to make it match what we're doing in the + # `initial_phase`. But we can avoid doing the extra `rooms` table join + # because we can assume all of these new events won't have this problem. + txn.execute( + """ + SELECT + c.room_id, + c.room_id, + c.user_id, + e.sender, + c.event_id, + c.membership, + c.event_stream_ordering, + e.instance_name, + e.outlier + FROM local_current_membership AS c + INNER JOIN events AS e USING (event_id) + WHERE event_stream_ordering > ? + ORDER BY event_stream_ordering ASC + LIMIT ? + """, + (last_event_stream_ordering, batch_size), + ) + else: + raise Exception("last_event_stream_ordering should not be None") + + memberships_to_update_rows = cast( + List[ + Tuple[ + str, Optional[str], str, str, str, str, int, Optional[str], bool + ] + ], + txn.fetchall(), + ) + + return memberships_to_update_rows + + memberships_to_update_rows = await self.db_pool.runInteraction( + "sliding_sync_membership_snapshots_bg_update._find_memberships_to_update_txn", + _find_memberships_to_update_txn, + ) + + if not memberships_to_update_rows: + if initial_phase: + # Move onto the next phase. + await self.db_pool.updates._background_update_progress( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + { + "initial_phase": False, + "last_event_stream_ordering": last_event_stream_ordering, + }, + ) + return 0 + else: + # We've finished both phases, we're done. + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE + ) + return 0 + + def _find_previous_membership_txn( + txn: LoggingTransaction, event_id: str, user_id: str + ) -> Tuple[str, str]: + # Find the previous invite/knock event before the leave event. This + # is done by looking at the auth events of the invite/knock and + # finding the corresponding membership event. + txn.execute( + """ + SELECT m.event_id, m.membership + FROM event_auth AS a + INNER JOIN room_memberships AS m ON (a.auth_id = m.event_id) + WHERE a.event_id = ? AND m.user_id = ? + """, + (event_id, user_id), + ) + row = txn.fetchone() + + # We should see a corresponding previous invite/knock event + assert row is not None + previous_event_id, membership = row + + return previous_event_id, membership + + # Map from (room_id, user_id) to ... + to_insert_membership_snapshots: Dict[ + Tuple[str, str], SlidingSyncMembershipSnapshotSharedInsertValues + ] = {} + to_insert_membership_infos: Dict[Tuple[str, str], SlidingSyncMembershipInfo] = ( + {} + ) + for ( + room_id, + room_id_from_rooms_table, + user_id, + sender, + membership_event_id, + membership, + membership_event_stream_ordering, + membership_event_instance_name, + is_outlier, + ) in memberships_to_update_rows: + # We don't know how to handle `membership` values other than these. The + # code below would need to be updated. + assert membership in ( + Membership.JOIN, + Membership.INVITE, + Membership.KNOCK, + Membership.LEAVE, + Membership.BAN, + ) + + # There are some old out-of-band memberships (before + # https://github.com/matrix-org/synapse/issues/6983) where we don't have the + # corresponding room stored in the `rooms` table`. We have a `FOREIGN KEY` + # constraint on the `sliding_sync_membership_snapshots` table so we have to + # fix-up these memberships by adding the room to the `rooms` table. + if room_id_from_rooms_table is None: + await self.db_pool.simple_insert( + table="rooms", + values={ + "room_id": room_id, + # Only out-of-band memberships are missing from the `rooms` + # table so that is the only type of membership we're dealing + # with here. Since we don't calculate the "chain cover" for + # out-of-band memberships, we can just set this to `True` as if + # the user ever joins the room, we will end up calculating the + # "chain cover" anyway. + "has_auth_chain_index": True, + }, + ) + + # Map of values to insert/update in the `sliding_sync_membership_snapshots` table + sliding_sync_membership_snapshots_insert_map: ( + SlidingSyncMembershipSnapshotSharedInsertValues + ) = {} + if membership == Membership.JOIN: + # If we're still joined, we can pull from current state. + current_state_ids_map: StateMap[ + str + ] = await self.hs.get_storage_controllers().state.get_current_state_ids( + room_id, + state_filter=StateFilter.from_types( + SLIDING_SYNC_RELEVANT_STATE_SET + ), + # Partially-stated rooms should have all state events except for + # remote membership events so we don't need to wait at all because + # we only want some non-membership state + await_full_state=False, + ) + # We're iterating over rooms that we are joined to so they should + # have `current_state_events` and we should have some current state + # for each room + assert current_state_ids_map + + fetched_events = await self.get_events(current_state_ids_map.values()) + + current_state_map: StateMap[EventBase] = { + state_key: fetched_events[event_id] + for state_key, event_id in current_state_ids_map.items() + # `get_events(...)` will filter out events for unknown room versions + if event_id in fetched_events + } + + # Can happen for unknown room versions (old room versions that aren't known + # anymore) since `get_events(...)` will filter out events for unknown room + # versions + if not current_state_map: + continue + + state_insert_values = ( + PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + current_state_map + ) + ) + sliding_sync_membership_snapshots_insert_map.update(state_insert_values) + # We should have some insert values for each room, even if they are `None` + assert sliding_sync_membership_snapshots_insert_map + + # We have current state to work from + sliding_sync_membership_snapshots_insert_map["has_known_state"] = True + elif membership in (Membership.INVITE, Membership.KNOCK) or ( + membership == Membership.LEAVE and is_outlier + ): + invite_or_knock_event_id = membership_event_id + invite_or_knock_membership = membership + + # If the event is an `out_of_band_membership` (special case of + # `outlier`), we never had historical state so we have to pull from + # the stripped state on the previous invite/knock event. This gives + # us a consistent view of the room state regardless of your + # membership (i.e. the room shouldn't disappear if your using the + # `is_encrypted` filter and you leave). + if membership == Membership.LEAVE and is_outlier: + invite_or_knock_event_id, invite_or_knock_membership = ( + await self.db_pool.runInteraction( + "sliding_sync_membership_snapshots_bg_update._find_previous_membership", + _find_previous_membership_txn, + membership_event_id, + user_id, + ) + ) + + # Pull from the stripped state on the invite/knock event + invite_or_knock_event = await self.get_event(invite_or_knock_event_id) + + raw_stripped_state_events = None + if invite_or_knock_membership == Membership.INVITE: + invite_room_state = invite_or_knock_event.unsigned.get( + "invite_room_state" + ) + raw_stripped_state_events = invite_room_state + elif invite_or_knock_membership == Membership.KNOCK: + knock_room_state = invite_or_knock_event.unsigned.get( + "knock_room_state" + ) + raw_stripped_state_events = knock_room_state + + sliding_sync_membership_snapshots_insert_map = PersistEventsStore._get_sliding_sync_insert_values_from_stripped_state( + raw_stripped_state_events + ) + + # We should have some insert values for each room, even if no + # stripped state is on the event because we still want to record + # that we have no known state + assert sliding_sync_membership_snapshots_insert_map + elif membership in (Membership.LEAVE, Membership.BAN): + # Pull from historical state + state_ids_map = await self.hs.get_storage_controllers().state.get_state_ids_for_event( + membership_event_id, + state_filter=StateFilter.from_types( + SLIDING_SYNC_RELEVANT_STATE_SET + ), + # Partially-stated rooms should have all state events except for + # remote membership events so we don't need to wait at all because + # we only want some non-membership state + await_full_state=False, + ) + + fetched_events = await self.get_events(state_ids_map.values()) + + state_map: StateMap[EventBase] = { + state_key: fetched_events[event_id] + for state_key, event_id in state_ids_map.items() + # `get_events(...)` will filter out events for unknown room versions + if event_id in fetched_events + } + + # Can happen for unknown room versions (old room versions that aren't known + # anymore) since `get_events(...)` will filter out events for unknown room + # versions + if not state_map: + continue + + state_insert_values = ( + PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + state_map + ) + ) + sliding_sync_membership_snapshots_insert_map.update(state_insert_values) + # We should have some insert values for each room, even if they are `None` + assert sliding_sync_membership_snapshots_insert_map + + # We have historical state to work from + sliding_sync_membership_snapshots_insert_map["has_known_state"] = True + else: + # We don't know how to handle this type of membership yet + # + # FIXME: We should use `assert_never` here but for some reason + # the exhaustive matching doesn't recognize the `Never` here. + # assert_never(membership) + raise AssertionError( + f"Unexpected membership {membership} ({membership_event_id}) that we don't know how to handle yet" + ) + + to_insert_membership_snapshots[(room_id, user_id)] = ( + sliding_sync_membership_snapshots_insert_map + ) + to_insert_membership_infos[(room_id, user_id)] = SlidingSyncMembershipInfo( + user_id=user_id, + sender=sender, + membership_event_id=membership_event_id, + membership=membership, + membership_event_stream_ordering=membership_event_stream_ordering, + # If instance_name is null we default to "master" + membership_event_instance_name=membership_event_instance_name + or "master", + ) + + def _fill_table_txn(txn: LoggingTransaction) -> None: + # Handle updating the `sliding_sync_membership_snapshots` table + # + for key, insert_map in to_insert_membership_snapshots.items(): + room_id, user_id = key + membership_info = to_insert_membership_infos[key] + sender = membership_info.sender + membership_event_id = membership_info.membership_event_id + membership = membership_info.membership + membership_event_stream_ordering = ( + membership_info.membership_event_stream_ordering + ) + membership_event_instance_name = ( + membership_info.membership_event_instance_name + ) + + # We don't need to upsert the state because we never partially + # insert/update the snapshots and anything already there is up-to-date + # EXCEPT for the `forgotten` field since that is updated out-of-band + # from the membership changes. + # + # Even though we're only doing insertions, we're using + # `simple_upsert_txn()` here to avoid unique violation errors that would + # happen from `simple_insert_txn()` + self.db_pool.simple_upsert_txn( + txn, + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id, "user_id": user_id}, + values={}, + insertion_values={ + **insert_map, + "sender": sender, + "membership_event_id": membership_event_id, + "membership": membership, + "event_stream_ordering": membership_event_stream_ordering, + "event_instance_name": membership_event_instance_name, + }, + ) + # We need to find the `forgotten` value during the transaction because + # we can't risk inserting stale data. + txn.execute( + """ + UPDATE sliding_sync_membership_snapshots + SET + forgotten = (SELECT forgotten FROM room_memberships WHERE event_id = ?) + WHERE room_id = ? and user_id = ? + """, + ( + membership_event_id, + room_id, + user_id, + ), + ) + + await self.db_pool.runInteraction( + "sliding_sync_membership_snapshots_bg_update", _fill_table_txn + ) + + # Update the progress + ( + room_id, + _room_id_from_rooms_table, + _user_id, + _sender, + _membership_event_id, + _membership, + membership_event_stream_ordering, + _membership_event_instance_name, + _is_outlier, + ) = memberships_to_update_rows[-1] + + progress = { + "initial_phase": initial_phase, + "last_room_id": room_id, + "last_event_stream_ordering": membership_event_stream_ordering, + } + + await self.db_pool.updates._background_update_progress( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + progress, + ) + + return len(memberships_to_update_rows) + + +def _resolve_stale_data_in_sliding_sync_tables( + txn: LoggingTransaction, +) -> None: + """ + Clears stale/out-of-date entries from the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables. + + This accounts for when someone downgrades their Synapse version and then upgrades it + again. This will ensure that we don't have any stale/out-of-date data in the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables since any new + events sent in rooms would have also needed to be written to the sliding sync + tables. For example a new event needs to bump `event_stream_ordering` in + `sliding_sync_joined_rooms` table or some state in the room changing (like the room + name). Or another example of someone's membership changing in a room affecting + `sliding_sync_membership_snapshots`. + + This way, if a row exists in the sliding sync tables, we are able to rely on it + (accurate data). And if a row doesn't exist, we use a fallback to get the same info + until the background updates fill in the rows or a new event comes in triggering it + to be fully inserted. + + FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + foreground update for + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + https://github.com/element-hq/synapse/issues/17623) + """ + + _resolve_stale_data_in_sliding_sync_joined_rooms_table(txn) + _resolve_stale_data_in_sliding_sync_membership_snapshots_table(txn) + + +def _resolve_stale_data_in_sliding_sync_joined_rooms_table( + txn: LoggingTransaction, +) -> None: + """ + Clears stale/out-of-date entries from the `sliding_sync_joined_rooms` table and + kicks-off the background update to catch-up with what we missed while Synapse was + downgraded. + + See `_resolve_stale_data_in_sliding_sync_tables()` description above for more + context. + """ + + # Find the point when we stopped writing to the `sliding_sync_joined_rooms` table + txn.execute( + """ + SELECT event_stream_ordering + FROM sliding_sync_joined_rooms + ORDER BY event_stream_ordering DESC + LIMIT 1 + """, + ) + + # If we have nothing written to the `sliding_sync_joined_rooms` table, there is + # nothing to clean up + row = cast(Optional[Tuple[int]], txn.fetchone()) + max_stream_ordering_sliding_sync_joined_rooms_table = None + depends_on = None + if row is not None: + (max_stream_ordering_sliding_sync_joined_rooms_table,) = row + + txn.execute( + """ + SELECT room_id + FROM events + WHERE stream_ordering > ? + GROUP BY room_id + ORDER BY MAX(stream_ordering) ASC + """, + (max_stream_ordering_sliding_sync_joined_rooms_table,), + ) + + room_rows = txn.fetchall() + # No new events have been written to the `events` table since the last time we wrote + # to the `sliding_sync_joined_rooms` table so there is nothing to clean up. This is + # the expected normal scenario for people who have not downgraded their Synapse + # version. + if not room_rows: + return + + # 1000 is an arbitrary batch size with no testing + for chunk in batch_iter(room_rows, 1000): + # Handle updating the `sliding_sync_joined_rooms` table + # + # Clear out the stale data + DatabasePool.simple_delete_many_batch_txn( + txn, + table="sliding_sync_joined_rooms", + keys=("room_id",), + values=chunk, + ) + + # Update the `sliding_sync_joined_rooms_to_recalculate` table with the rooms + # that went stale and now need to be recalculated. + DatabasePool.simple_upsert_many_txn_native_upsert( + txn, + table="sliding_sync_joined_rooms_to_recalculate", + key_names=("room_id",), + key_values=chunk, + value_names=(), + # No value columns, therefore make a blank list so that the following + # zip() works correctly. + value_values=[() for x in range(len(chunk))], + ) + else: + # Avoid adding the background updates when there is no data to run them on (if + # the homeserver has no rooms). The portdb script refuses to run with pending + # background updates and since we potentially add them every time the server + # starts, we add this check for to allow the script to breath. + txn.execute("SELECT 1 FROM local_current_membership LIMIT 1") + row = txn.fetchone() + if row is None: + # There are no rooms, so don't schedule the bg update. + return + + # Re-run the `sliding_sync_joined_rooms_to_recalculate` prefill if there is + # nothing in the `sliding_sync_joined_rooms` table + DatabasePool.simple_upsert_txn_native_upsert( + txn, + table="background_updates", + keyvalues={ + "update_name": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE + }, + values={}, + # Only insert the row if it doesn't already exist. If it already exists, + # we're already working on it + insertion_values={ + "progress_json": "{}", + }, + ) + depends_on = ( + _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE + ) + + # Now kick-off the background update to catch-up with what we missed while Synapse + # was downgraded. + # + # We may need to catch-up on everything if we have nothing written to the + # `sliding_sync_joined_rooms` table yet. This could happen if someone had zero rooms + # on their server (so the normal background update completes), downgrade Synapse + # versions, join and create some new rooms, and upgrade again. + DatabasePool.simple_upsert_txn_native_upsert( + txn, + table="background_updates", + keyvalues={ + "update_name": _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE + }, + values={}, + # Only insert the row if it doesn't already exist. If it already exists, we will + # eventually fill in the rows we're trying to populate. + insertion_values={ + # Empty progress is expected since it's not used for this background update. + "progress_json": "{}", + # Wait for the prefill to finish + "depends_on": depends_on, + }, + ) + + +def _resolve_stale_data_in_sliding_sync_membership_snapshots_table( + txn: LoggingTransaction, +) -> None: + """ + Clears stale/out-of-date entries from the `sliding_sync_membership_snapshots` table + and kicks-off the background update to catch-up with what we missed while Synapse + was downgraded. + + See `_resolve_stale_data_in_sliding_sync_tables()` description above for more + context. + """ + + # Find the point when we stopped writing to the `sliding_sync_membership_snapshots` table + txn.execute( + """ + SELECT event_stream_ordering + FROM sliding_sync_membership_snapshots + ORDER BY event_stream_ordering DESC + LIMIT 1 + """, + ) + + # If we have nothing written to the `sliding_sync_membership_snapshots` table, + # there is nothing to clean up + row = cast(Optional[Tuple[int]], txn.fetchone()) + max_stream_ordering_sliding_sync_membership_snapshots_table = None + if row is not None: + (max_stream_ordering_sliding_sync_membership_snapshots_table,) = row + + # XXX: Since `forgotten` is simply a flag on the `room_memberships` table that is + # set out-of-band, there is no way to tell whether it was set while Synapse was + # downgraded. The only thing the user can do is `/forget` again if they run into + # this. + # + # This only picks up changes to memberships. + txn.execute( + """ + SELECT user_id, room_id + FROM local_current_membership + WHERE event_stream_ordering > ? + ORDER BY event_stream_ordering ASC + """, + (max_stream_ordering_sliding_sync_membership_snapshots_table,), + ) + + membership_rows = txn.fetchall() + # No new events have been written to the `events` table since the last time we wrote + # to the `sliding_sync_membership_snapshots` table so there is nothing to clean up. + # This is the expected normal scenario for people who have not downgraded their + # Synapse version. + if not membership_rows: + return + + # 1000 is an arbitrary batch size with no testing + for chunk in batch_iter(membership_rows, 1000): + # Handle updating the `sliding_sync_membership_snapshots` table + # + DatabasePool.simple_delete_many_batch_txn( + txn, + table="sliding_sync_membership_snapshots", + keys=("user_id", "room_id"), + values=chunk, + ) + else: + # Avoid adding the background updates when there is no data to run them on (if + # the homeserver has no rooms). The portdb script refuses to run with pending + # background updates and since we potentially add them every time the server + # starts, we add this check for to allow the script to breath. + txn.execute("SELECT 1 FROM local_current_membership LIMIT 1") + row = txn.fetchone() + if row is None: + # There are no rooms, so don't schedule the bg update. + return + + # Now kick-off the background update to catch-up with what we missed while Synapse + # was downgraded. + # + # We may need to catch-up on everything if we have nothing written to the + # `sliding_sync_membership_snapshots` table yet. This could happen if someone had + # zero rooms on their server (so the normal background update completes), downgrade + # Synapse versions, join and create some new rooms, and upgrade again. + # + progress_json: JsonDict = {} + if max_stream_ordering_sliding_sync_membership_snapshots_table is not None: + progress_json["initial_phase"] = False + progress_json["last_event_stream_ordering"] = ( + max_stream_ordering_sliding_sync_membership_snapshots_table + ) + + DatabasePool.simple_upsert_txn_native_upsert( + txn, + table="background_updates", + keyvalues={ + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE + }, + values={}, + # Only insert the row if it doesn't already exist. If it already exists, we will + # eventually fill in the rows we're trying to populate. + insertion_values={ + "progress_json": json_encoder.encode(progress_json), + }, + ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 4d4877c4c3..6079cc4a52 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -457,6 +457,8 @@ class EventsWorkerStore(SQLBaseStore): ) -> Optional[EventBase]: """Get an event from the database by event_id. + Events for unknown room versions will also be filtered out. + Args: event_id: The event_id of the event to fetch @@ -511,6 +513,10 @@ class EventsWorkerStore(SQLBaseStore): ) -> Dict[str, EventBase]: """Get events from the database + Unknown events will be omitted from the response. + + Events for unknown room versions will also be filtered out. + Args: event_ids: The event_ids of the events to fetch @@ -553,6 +559,8 @@ class EventsWorkerStore(SQLBaseStore): Unknown events will be omitted from the response. + Events for unknown room versions will also be filtered out. + Args: event_ids: The event_ids of the events to fetch diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 3b81ed943c..fc4c286595 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -454,6 +454,10 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): # so must be deleted first. "local_current_membership", "room_memberships", + # Note: the sliding_sync_ tables have foreign keys to the `events` table + # so must be deleted first. + "sliding_sync_joined_rooms", + "sliding_sync_membership_snapshots", "events", "federation_inbound_events_staging", "receipts_graph", diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 71baf57663..722686d4b8 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1337,6 +1337,12 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): keyvalues={"user_id": user_id, "room_id": room_id}, updatevalues={"forgotten": 1}, ) + self.db_pool.simple_update_txn( + txn, + table="sliding_sync_membership_snapshots", + keyvalues={"user_id": user_id, "room_id": room_id}, + updatevalues={"forgotten": 1}, + ) self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id)) self._invalidate_cache_and_stream( diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index eaa13da368..ba52fff652 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -161,45 +161,80 @@ class StateDeltasStore(SQLBaseStore): self._get_max_stream_id_in_current_state_deltas_txn, ) + def get_current_state_deltas_for_room_txn( + self, + txn: LoggingTransaction, + room_id: str, + *, + from_token: Optional[RoomStreamToken], + to_token: Optional[RoomStreamToken], + ) -> List[StateDelta]: + """ + Get the state deltas between two tokens. + + (> `from_token` and <= `to_token`) + """ + from_clause = "" + from_args = [] + if from_token is not None: + from_clause = "AND ? < stream_id" + from_args = [from_token.stream] + + to_clause = "" + to_args = [] + if to_token is not None: + to_clause = "AND stream_id <= ?" + to_args = [to_token.get_max_stream_pos()] + + sql = f""" + SELECT instance_name, stream_id, type, state_key, event_id, prev_event_id + FROM current_state_delta_stream + WHERE room_id = ? {from_clause} {to_clause} + ORDER BY stream_id ASC + """ + txn.execute(sql, [room_id] + from_args + to_args) + + return [ + StateDelta( + stream_id=row[1], + room_id=room_id, + event_type=row[2], + state_key=row[3], + event_id=row[4], + prev_event_id=row[5], + ) + for row in txn + if _filter_results_by_stream(from_token, to_token, row[0], row[1]) + ] + @trace async def get_current_state_deltas_for_room( - self, room_id: str, from_token: RoomStreamToken, to_token: RoomStreamToken + self, + room_id: str, + *, + from_token: Optional[RoomStreamToken], + to_token: Optional[RoomStreamToken], ) -> List[StateDelta]: - """Get the state deltas between two tokens.""" + """ + Get the state deltas between two tokens. - if not self._curr_state_delta_stream_cache.has_entity_changed( - room_id, from_token.stream + (> `from_token` and <= `to_token`) + """ + + if ( + from_token is not None + and not self._curr_state_delta_stream_cache.has_entity_changed( + room_id, from_token.stream + ) ): return [] - def get_current_state_deltas_for_room_txn( - txn: LoggingTransaction, - ) -> List[StateDelta]: - sql = """ - SELECT instance_name, stream_id, type, state_key, event_id, prev_event_id - FROM current_state_delta_stream - WHERE room_id = ? AND ? < stream_id AND stream_id <= ? - ORDER BY stream_id ASC - """ - txn.execute( - sql, (room_id, from_token.stream, to_token.get_max_stream_pos()) - ) - - return [ - StateDelta( - stream_id=row[1], - room_id=room_id, - event_type=row[2], - state_key=row[3], - event_id=row[4], - prev_event_id=row[5], - ) - for row in txn - if _filter_results_by_stream(from_token, to_token, row[0], row[1]) - ] - return await self.db_pool.runInteraction( - "get_current_state_deltas_for_room", get_current_state_deltas_for_room_txn + "get_current_state_deltas_for_room", + self.get_current_state_deltas_for_room_txn, + room_id, + from_token=from_token, + to_token=to_token, ) @trace diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index e33a8cfe97..1a59e0b5a8 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1264,12 +1264,76 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return None + async def get_last_event_pos_in_room( + self, + room_id: str, + event_types: Optional[StrCollection] = None, + ) -> Optional[Tuple[str, PersistedEventPosition]]: + """ + Returns the ID and event position of the last event in a room. + + Based on `get_last_event_pos_in_room_before_stream_ordering(...)` + + Args: + room_id + event_types: Optional allowlist of event types to filter by + + Returns: + The ID of the most recent event and it's position, or None if there are no + events in the room that match the given event types. + """ + + def _get_last_event_pos_in_room_txn( + txn: LoggingTransaction, + ) -> Optional[Tuple[str, PersistedEventPosition]]: + event_type_clause = "" + event_type_args: List[str] = [] + if event_types is not None and len(event_types) > 0: + event_type_clause, event_type_args = make_in_list_sql_clause( + txn.database_engine, "type", event_types + ) + event_type_clause = f"AND {event_type_clause}" + + sql = f""" + SELECT event_id, stream_ordering, instance_name + FROM events + LEFT JOIN rejections USING (event_id) + WHERE room_id = ? + {event_type_clause} + AND NOT outlier + AND rejections.event_id IS NULL + ORDER BY stream_ordering DESC + LIMIT 1 + """ + + txn.execute( + sql, + [room_id] + event_type_args, + ) + + row = cast(Optional[Tuple[str, int, str]], txn.fetchone()) + if row is not None: + event_id, stream_ordering, instance_name = row + + return event_id, PersistedEventPosition( + # If instance_name is null we default to "master" + instance_name or "master", + stream_ordering, + ) + + return None + + return await self.db_pool.runInteraction( + "get_last_event_pos_in_room", + _get_last_event_pos_in_room_txn, + ) + @trace async def get_last_event_pos_in_room_before_stream_ordering( self, room_id: str, end_token: RoomStreamToken, - event_types: Optional[Collection[str]] = None, + event_types: Optional[StrCollection] = None, ) -> Optional[Tuple[str, PersistedEventPosition]]: """ Returns the ID and event position of the last event in a room at or before a diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 581d00346b..316541d818 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -19,7 +19,7 @@ # # -SCHEMA_VERSION = 86 # remember to update the list below when updating +SCHEMA_VERSION = 87 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -142,6 +142,10 @@ Changes in SCHEMA_VERSION = 85 Changes in SCHEMA_VERSION = 86 - Add a column `authenticated` to the tables `local_media_repository` and `remote_media_cache` + +Changes in SCHEMA_VERSION = 87 + - Add tables to store Sliding Sync data for quick filtering/sorting + (`sliding_sync_joined_rooms`, `sliding_sync_membership_snapshots`) """ diff --git a/synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql b/synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql new file mode 100644 index 0000000000..2f71e541f8 --- /dev/null +++ b/synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql @@ -0,0 +1,169 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +-- This table is a list/queue used to keep track of which rooms need to be inserted into +-- `sliding_sync_joined_rooms`. We do this to avoid reading from `current_state_events` +-- during the background update to populate `sliding_sync_joined_rooms` which works but +-- it takes a lot of work for the database to grab `DISTINCT` room_ids given how many +-- state events there are for each room. +-- +-- This table is prefilled with every room in the `rooms` table (see the +-- `sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update` background +-- update). This table is also updated whenever we come across stale data so that we can +-- catch-up with all of the new data if Synapse was downgraded (see +-- `_resolve_stale_data_in_sliding_sync_tables`). +-- +-- FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +-- foreground update for +-- `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +-- https://github.com/element-hq/synapse/issues/17623) +CREATE TABLE IF NOT EXISTS sliding_sync_joined_rooms_to_recalculate( + room_id TEXT NOT NULL REFERENCES rooms(room_id), + PRIMARY KEY (room_id) +); + +-- A table for storing room meta data (current state relevant to sliding sync) that the +-- local server is still participating in (someone local is joined to the room). +-- +-- We store the joined rooms in separate table from `sliding_sync_membership_snapshots` +-- because we need up-to-date information for joined rooms and it can be shared across +-- everyone who is joined. +-- +-- This table is kept in sync with `current_state_events` which means if the server is +-- no longer participating in a room, the row will be deleted. +CREATE TABLE IF NOT EXISTS sliding_sync_joined_rooms( + room_id TEXT NOT NULL REFERENCES rooms(room_id), + -- The `stream_ordering` of the most-recent/latest event in the room + event_stream_ordering BIGINT NOT NULL REFERENCES events(stream_ordering), + -- The `stream_ordering` of the last event according to the `bump_event_types` + bump_stamp BIGINT, + -- `m.room.create` -> `content.type` (current state) + -- + -- Useful for the `spaces`/`not_spaces` filter in the Sliding Sync API + room_type TEXT, + -- `m.room.name` -> `content.name` (current state) + -- + -- Useful for the room meta data and `room_name_like` filter in the Sliding Sync API + room_name TEXT, + -- `m.room.encryption` -> `content.algorithm` (current state) + -- + -- Useful for the `is_encrypted` filter in the Sliding Sync API + is_encrypted BOOLEAN DEFAULT FALSE NOT NULL, + -- `m.room.tombstone` -> `content.replacement_room` (according to the current state at the + -- time of the membership). + -- + -- Useful for the `include_old_rooms` functionality in the Sliding Sync API + tombstone_successor_room_id TEXT, + PRIMARY KEY (room_id) +); + +-- So we can purge rooms easily. +-- +-- The primary key is already `room_id` + +-- So we can sort by `stream_ordering +CREATE UNIQUE INDEX IF NOT EXISTS sliding_sync_joined_rooms_event_stream_ordering ON sliding_sync_joined_rooms(event_stream_ordering); + +-- A table for storing a snapshot of room meta data (historical current state relevant +-- for sliding sync) at the time of a local user's membership. Only has rows for the +-- latest membership event for a given local user in a room which matches +-- `local_current_membership` . +-- +-- We store all memberships including joins. This makes it easy to reference this table +-- to find all membership for a given user and shares the same semantics as +-- `local_current_membership`. And we get to avoid some table maintenance; if we only +-- stored non-joins, we would have to delete the row for the user when the user joins +-- the room. Stripped state doesn't include the `m.room.tombstone` event, so we just +-- assume that the room doesn't have a tombstone. +-- +-- For remote invite/knocks where the server is not participating in the room, we will +-- use stripped state events to populate this table. We assume that if any stripped +-- state is given, it will include all possible stripped state events types. For +-- example, if stripped state is given but `m.room.encryption` isn't included, we will +-- assume that the room is not encrypted. +-- +-- We don't include `bump_stamp` here because we can just use the `stream_ordering` from +-- the membership event itself as the `bump_stamp`. +CREATE TABLE IF NOT EXISTS sliding_sync_membership_snapshots( + room_id TEXT NOT NULL REFERENCES rooms(room_id), + user_id TEXT NOT NULL, + -- Useful to be able to tell leaves from kicks (where the `user_id` is different from the `sender`) + sender TEXT NOT NULL, + membership_event_id TEXT NOT NULL REFERENCES events(event_id), + membership TEXT NOT NULL, + -- This is an integer just to match `room_memberships` and also means we don't need + -- to do any casting. + forgotten INTEGER DEFAULT 0 NOT NULL, + -- `stream_ordering` of the `membership_event_id` + event_stream_ordering BIGINT NOT NULL REFERENCES events(stream_ordering), + -- `instance_name` of the worker that persisted the `membership_event_id`. + -- Useful for crafting `PersistedEventPosition(...)` + event_instance_name TEXT NOT NULL, + -- For remote invites/knocks that don't include any stripped state, we want to be + -- able to distinguish between a room with `None` as valid value for some state and + -- room where the state is completely unknown. Basically, this should be True unless + -- no stripped state was provided for a remote invite/knock (False). + has_known_state BOOLEAN DEFAULT FALSE NOT NULL, + -- `m.room.create` -> `content.type` (according to the current state at the time of + -- the membership). + -- + -- Useful for the `spaces`/`not_spaces` filter in the Sliding Sync API + room_type TEXT, + -- `m.room.name` -> `content.name` (according to the current state at the time of + -- the membership). + -- + -- Useful for the room meta data and `room_name_like` filter in the Sliding Sync API + room_name TEXT, + -- `m.room.encryption` -> `content.algorithm` (according to the current state at the + -- time of the membership). + -- + -- Useful for the `is_encrypted` filter in the Sliding Sync API + is_encrypted BOOLEAN DEFAULT FALSE NOT NULL, + -- `m.room.tombstone` -> `content.replacement_room` (according to the current state at the + -- time of the membership). + -- + -- Useful for the `include_old_rooms` functionality in the Sliding Sync API + tombstone_successor_room_id TEXT, + PRIMARY KEY (room_id, user_id) +); + +-- So we can purge rooms easily. +-- +-- Since we're using a multi-column index as the primary key (room_id, user_id), the +-- first index column (room_id) is always usable for searching so we don't need to +-- create a separate index for it. +-- +-- CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_room_id ON sliding_sync_membership_snapshots(room_id); + +-- So we can fetch all rooms for a given user +CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_user_id ON sliding_sync_membership_snapshots(user_id); +-- So we can sort by `stream_ordering +CREATE UNIQUE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_event_stream_ordering ON sliding_sync_membership_snapshots(event_stream_ordering); + + +-- Add a series of background updates to populate the new `sliding_sync_joined_rooms` table: +-- +-- 1. Add a background update to prefill `sliding_sync_joined_rooms_to_recalculate`. +-- We do a one-shot bulk insert from the `rooms` table to prefill. +-- 2. Add a background update to populate the new `sliding_sync_joined_rooms` table +-- based on the rooms listed in the `sliding_sync_joined_rooms_to_recalculate` +-- table. +-- +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8701, 'sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update', '{}'); +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (8701, 'sliding_sync_joined_rooms_bg_update', '{}', 'sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update'); + +-- Add a background updates to populate the new `sliding_sync_membership_snapshots` table +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8701, 'sliding_sync_membership_snapshots_bg_update', '{}'); diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index b303bb1f96..126f03dd90 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -30,6 +30,7 @@ if TYPE_CHECKING or HAS_PYDANTIC_V2: else: from pydantic import Extra +from synapse.api.constants import EventTypes from synapse.events import EventBase from synapse.types import ( DeviceListUpdates, @@ -45,6 +46,18 @@ from synapse.types.rest.client import SlidingSyncBody if TYPE_CHECKING: from synapse.handlers.relations import BundledAggregations +# Sliding Sync: The event types that clients should consider as new activity and affect +# the `bump_stamp` +SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES = { + EventTypes.Create, + EventTypes.Message, + EventTypes.Encrypted, + EventTypes.Sticker, + EventTypes.CallInvite, + EventTypes.PollStart, + EventTypes.LiveLocationShareStart, +} + class ShutdownRoomParams(TypedDict): """ diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 04f11c0524..690912133a 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -16,7 +16,7 @@ import logging from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.rest.client import login, room, sync from synapse.server import HomeServer @@ -44,6 +44,10 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + self.state_handler = self.hs.get_state_handler() + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.persistence = persistence def test_rooms_meta_when_joined(self) -> None: """ @@ -600,16 +604,16 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): Test that `bump_stamp` ignores backfilled events, i.e. events with a negative stream ordering. """ - user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") # Create a remote room creator = "@user:other" room_id = "!foo:other" + room_version = RoomVersions.V10 shared_kwargs = { "room_id": room_id, - "room_version": "10", + "room_version": room_version.identifier, } create_tuple = self.get_success( @@ -618,6 +622,12 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): prev_event_ids=[], type=EventTypes.Create, state_key="", + content={ + # The `ROOM_CREATOR` field could be removed if we used a room + # version > 10 (in favor of relying on `sender`) + EventContentFields.ROOM_CREATOR: creator, + EventContentFields.ROOM_VERSION: room_version.identifier, + }, sender=creator, **shared_kwargs, ) @@ -667,22 +677,29 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): ] # Ensure the local HS knows the room version - self.get_success( - self.store.store_room(room_id, creator, False, RoomVersions.V10) - ) + self.get_success(self.store.store_room(room_id, creator, False, room_version)) # Persist these events as backfilled events. - persistence = self.hs.get_storage_controllers().persistence - assert persistence is not None - for event, context in remote_events_and_contexts: - self.get_success(persistence.persist_event(event, context, backfilled=True)) + self.get_success( + self.persistence.persist_event(event, context, backfilled=True) + ) - # Now we join the local user to the room - join_tuple = self.get_success( + # Now we join the local user to the room. We want to make this feel as close to + # the real `process_remote_join()` as possible but we'd like to avoid some of + # the auth checks that would be done in the real code. + # + # FIXME: The test was originally written using this less-real + # `persist_event(...)` shortcut but it would be nice to use the real remote join + # process in a `FederatingHomeserverTestCase`. + flawed_join_tuple = self.get_success( create_event( self.hs, prev_event_ids=[invite_tuple[0].event_id], + # This doesn't work correctly to create an `EventContext` that includes + # both of these state events. I assume it's because we're working on our + # local homeserver which has the remote state set as `outlier`. We have + # to create our own EventContext below to get this right. auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id], type=EventTypes.Member, state_key=user1_id, @@ -691,7 +708,22 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): **shared_kwargs, ) ) - self.get_success(persistence.persist_event(*join_tuple)) + # We have to create our own context to get the state set correctly. If we use + # the `EventContext` from the `flawed_join_tuple`, the `current_state_events` + # table will only have the join event in it which should never happen in our + # real server. + join_event = flawed_join_tuple[0] + join_context = self.get_success( + self.state_handler.compute_event_context( + join_event, + state_ids_before_event={ + (e.type, e.state_key): e.event_id + for e in [create_tuple[0], invite_tuple[0]] + }, + partial_state=False, + ) + ) + self.get_success(self.persistence.persist_event(join_event, join_context)) # Doing an SS request should return a positive `bump_stamp`, even though # the only event that matches the bump types has as negative stream diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 506d981ce6..49dc973a36 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -112,6 +112,24 @@ class UpdateUpsertManyTests(unittest.HomeserverTestCase): {(1, "user1", "hello"), (2, "user2", "bleb")}, ) + self.get_success( + self.storage.db_pool.runInteraction( + "test", + self.storage.db_pool.simple_upsert_many_txn, + self.table_name, + key_names=key_names, + key_values=[[2, "user2"]], + value_names=[], + value_values=[], + ) + ) + + # Check results are what we expect + self.assertEqual( + set(self._dump_table_to_tuple()), + {(1, "user1", "hello"), (2, "user2", "bleb")}, + ) + def test_simple_update_many(self) -> None: """ simple_update_many performs many updates at once. diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index 0a7c4c9421..cb3d8e19bc 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -19,6 +19,7 @@ # # +import logging from typing import List, Optional from twisted.test.proto_helpers import MemoryReactor @@ -35,6 +36,8 @@ from synapse.util import Clock from tests.unittest import HomeserverTestCase +logger = logging.getLogger(__name__) + class ExtremPruneTestCase(HomeserverTestCase): servlets = [ diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 418b556108..330fea0e62 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -24,7 +24,7 @@ from typing import List, Optional, Tuple, cast from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.constants import EventContentFields, EventTypes, JoinRules, Membership from synapse.api.room_versions import RoomVersions from synapse.rest import admin from synapse.rest.admin import register_servlets_for_client_rest_resource @@ -38,6 +38,7 @@ from synapse.util import Clock from tests import unittest from tests.server import TestHomeServer from tests.test_utils import event_injection +from tests.test_utils.event_injection import create_event from tests.unittest import skip_unless logger = logging.getLogger(__name__) @@ -54,6 +55,10 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): # We can't test the RoomMemberStore on its own without the other event # storage logic self.store = hs.get_datastores().main + self.state_handler = self.hs.get_state_handler() + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.persistence = persistence self.u_alice = self.register_user("alice", "pass") self.t_alice = self.login("alice", "pass") @@ -220,31 +225,166 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): ) def test_join_locally_forgotten_room(self) -> None: - """Tests if a user joins a forgotten room the room is not forgotten anymore.""" - self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) - self.assertFalse( - self.get_success(self.store.is_locally_forgotten_room(self.room)) - ) + """ + Tests if a user joins a forgotten room, the room is not forgotten anymore. - # after leaving and forget the room, it is forgotten - self.get_success( - event_injection.inject_member_event( - self.hs, self.room, self.u_alice, "leave" + Since a room can't be re-joined if everyone has left. This can only happen with + a room with remote users in it. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote room + creator = "@user:other" + room_id = "!foo:other" + room_version = RoomVersions.V10 + shared_kwargs = { + "room_id": room_id, + "room_version": room_version.identifier, + } + + create_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[], + type=EventTypes.Create, + state_key="", + content={ + # The `ROOM_CREATOR` field could be removed if we used a room + # version > 10 (in favor of relying on `sender`) + EventContentFields.ROOM_CREATOR: creator, + EventContentFields.ROOM_VERSION: room_version.identifier, + }, + sender=creator, + **shared_kwargs, ) ) - self.get_success(self.store.forget(self.u_alice, self.room)) - self.assertTrue( - self.get_success(self.store.is_locally_forgotten_room(self.room)) - ) - - # after rejoin the room is not forgotten anymore - self.get_success( - event_injection.inject_member_event( - self.hs, self.room, self.u_alice, "join" + creator_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[create_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Member, + state_key=creator, + content={"membership": Membership.JOIN}, + sender=creator, + **shared_kwargs, ) ) + + remote_events_and_contexts = [ + create_tuple, + creator_tuple, + ] + + # Ensure the local HS knows the room version + self.get_success(self.store.store_room(room_id, creator, False, room_version)) + + # Persist these events as backfilled events. + for event, context in remote_events_and_contexts: + self.get_success( + self.persistence.persist_event(event, context, backfilled=True) + ) + + # Now we join the local user to the room. We want to make this feel as close to + # the real `process_remote_join()` as possible but we'd like to avoid some of + # the auth checks that would be done in the real code. + # + # FIXME: The test was originally written using this less-real + # `persist_event(...)` shortcut but it would be nice to use the real remote join + # process in a `FederatingHomeserverTestCase`. + flawed_join_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[creator_tuple[0].event_id], + # This doesn't work correctly to create an `EventContext` that includes + # both of these state events. I assume it's because we're working on our + # local homeserver which has the remote state set as `outlier`. We have + # to create our own EventContext below to get this right. + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.JOIN}, + sender=user1_id, + **shared_kwargs, + ) + ) + # We have to create our own context to get the state set correctly. If we use + # the `EventContext` from the `flawed_join_tuple`, the `current_state_events` + # table will only have the join event in it which should never happen in our + # real server. + join_event = flawed_join_tuple[0] + join_context = self.get_success( + self.state_handler.compute_event_context( + join_event, + state_ids_before_event={ + (e.type, e.state_key): e.event_id for e in [create_tuple[0]] + }, + partial_state=False, + ) + ) + self.get_success(self.persistence.persist_event(join_event, join_context)) + + # The room shouldn't be forgotten because the local user just joined self.assertFalse( - self.get_success(self.store.is_locally_forgotten_room(self.room)) + self.get_success(self.store.is_locally_forgotten_room(room_id)) + ) + + # After all of the local users (there is only user1) leave and forgetting the + # room, it is forgotten + user1_leave_response = self.helper.leave(room_id, user1_id, tok=user1_tok) + user1_leave_event = self.get_success( + self.store.get_event(user1_leave_response["event_id"]) + ) + self.get_success(self.store.forget(user1_id, room_id)) + self.assertTrue(self.get_success(self.store.is_locally_forgotten_room(room_id))) + + # Join the local user to the room (again). We want to make this feel as close to + # the real `process_remote_join()` as possible but we'd like to avoid some of + # the auth checks that would be done in the real code. + # + # FIXME: The test was originally written using this less-real + # `event_injection.inject_member_event(...)` shortcut but it would be nice to + # use the real remote join process in a `FederatingHomeserverTestCase`. + flawed_join_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[user1_leave_response["event_id"]], + # This doesn't work correctly to create an `EventContext` that includes + # both of these state events. I assume it's because we're working on our + # local homeserver which has the remote state set as `outlier`. We have + # to create our own EventContext below to get this right. + auth_event_ids=[ + create_tuple[0].event_id, + user1_leave_response["event_id"], + ], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.JOIN}, + sender=user1_id, + **shared_kwargs, + ) + ) + # We have to create our own context to get the state set correctly. If we use + # the `EventContext` from the `flawed_join_tuple`, the `current_state_events` + # table will only have the join event in it which should never happen in our + # real server. + join_event = flawed_join_tuple[0] + join_context = self.get_success( + self.state_handler.compute_event_context( + join_event, + state_ids_before_event={ + (e.type, e.state_key): e.event_id + for e in [create_tuple[0], user1_leave_event] + }, + partial_state=False, + ) + ) + self.get_success(self.persistence.persist_event(join_event, join_context)) + + # After the local user rejoins the remote room, it isn't forgotten anymore + self.assertFalse( + self.get_success(self.store.is_locally_forgotten_room(room_id)) ) diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py new file mode 100644 index 0000000000..4be098b6f6 --- /dev/null +++ b/tests/storage/test_sliding_sync_tables.py @@ -0,0 +1,4830 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +# Originally licensed under the Apache License, Version 2.0: +# . +# +# [This file includes modifications made by New Vector Limited] +# +# +import logging +from typing import Dict, List, Optional, Tuple, cast + +import attr +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes +from synapse.api.room_versions import RoomVersions +from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict +from synapse.events.snapshot import EventContext +from synapse.rest import admin +from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.storage.databases.main.events import DeltaState +from synapse.storage.databases.main.events_bg_updates import ( + _BackgroundUpdates, + _resolve_stale_data_in_sliding_sync_joined_rooms_table, + _resolve_stale_data_in_sliding_sync_membership_snapshots_table, +) +from synapse.util import Clock + +from tests.test_utils.event_injection import create_event +from tests.unittest import HomeserverTestCase + +logger = logging.getLogger(__name__) + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _SlidingSyncJoinedRoomResult: + room_id: str + # `event_stream_ordering` is only optional to allow easier semantics when we make + # expected objects from `event.internal_metadata.stream_ordering`. in the tests. + # `event.internal_metadata.stream_ordering` is marked optional because it only + # exists for persisted events but in the context of these tests, we're only working + # with persisted events and we're making comparisons so we will find any mismatch. + event_stream_ordering: Optional[int] + bump_stamp: Optional[int] + room_type: Optional[str] + room_name: Optional[str] + is_encrypted: bool + tombstone_successor_room_id: Optional[str] + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _SlidingSyncMembershipSnapshotResult: + room_id: str + user_id: str + sender: str + membership_event_id: str + membership: str + # `event_stream_ordering` is only optional to allow easier semantics when we make + # expected objects from `event.internal_metadata.stream_ordering`. in the tests. + # `event.internal_metadata.stream_ordering` is marked optional because it only + # exists for persisted events but in the context of these tests, we're only working + # with persisted events and we're making comparisons so we will find any mismatch. + event_stream_ordering: Optional[int] + has_known_state: bool + room_type: Optional[str] + room_name: Optional[str] + is_encrypted: bool + tombstone_successor_room_id: Optional[str] + # Make this default to "not forgotten" because it doesn't apply to many tests and we + # don't want to force all of the tests to deal with it. + forgotten: bool = False + + +class SlidingSyncTablesTestCaseBase(HomeserverTestCase): + """ + Helpers to deal with testing that the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` database tables are + populated correctly. + """ + + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + persist_events_store = self.hs.get_datastores().persist_events + assert persist_events_store is not None + self.persist_events_store = persist_events_store + + def _get_sliding_sync_joined_rooms(self) -> Dict[str, _SlidingSyncJoinedRoomResult]: + """ + Return the rows from the `sliding_sync_joined_rooms` table. + + Returns: + Mapping from room_id to _SlidingSyncJoinedRoomResult. + """ + rows = cast( + List[Tuple[str, int, int, str, str, bool, str]], + self.get_success( + self.store.db_pool.simple_select_list( + "sliding_sync_joined_rooms", + None, + retcols=( + "room_id", + "event_stream_ordering", + "bump_stamp", + "room_type", + "room_name", + "is_encrypted", + "tombstone_successor_room_id", + ), + ), + ), + ) + + return { + row[0]: _SlidingSyncJoinedRoomResult( + room_id=row[0], + event_stream_ordering=row[1], + bump_stamp=row[2], + room_type=row[3], + room_name=row[4], + is_encrypted=bool(row[5]), + tombstone_successor_room_id=row[6], + ) + for row in rows + } + + def _get_sliding_sync_membership_snapshots( + self, + ) -> Dict[Tuple[str, str], _SlidingSyncMembershipSnapshotResult]: + """ + Return the rows from the `sliding_sync_membership_snapshots` table. + + Returns: + Mapping from the (room_id, user_id) to _SlidingSyncMembershipSnapshotResult. + """ + rows = cast( + List[Tuple[str, str, str, str, str, int, int, bool, str, str, bool, str]], + self.get_success( + self.store.db_pool.simple_select_list( + "sliding_sync_membership_snapshots", + None, + retcols=( + "room_id", + "user_id", + "sender", + "membership_event_id", + "membership", + "forgotten", + "event_stream_ordering", + "has_known_state", + "room_type", + "room_name", + "is_encrypted", + "tombstone_successor_room_id", + ), + ), + ), + ) + + return { + (row[0], row[1]): _SlidingSyncMembershipSnapshotResult( + room_id=row[0], + user_id=row[1], + sender=row[2], + membership_event_id=row[3], + membership=row[4], + forgotten=bool(row[5]), + event_stream_ordering=row[6], + has_known_state=bool(row[7]), + room_type=row[8], + room_name=row[9], + is_encrypted=bool(row[10]), + tombstone_successor_room_id=row[11], + ) + for row in rows + } + + _remote_invite_count: int = 0 + + def _create_remote_invite_room_for_user( + self, + invitee_user_id: str, + unsigned_invite_room_state: Optional[List[StrippedStateEvent]], + ) -> Tuple[str, EventBase]: + """ + Create a fake invite for a remote room and persist it. + + We don't have any state for these kind of rooms and can only rely on the + stripped state included in the unsigned portion of the invite event to identify + the room. + + Args: + invitee_user_id: The person being invited + unsigned_invite_room_state: List of stripped state events to assist the + receiver in identifying the room. + + Returns: + The room ID of the remote invite room and the persisted remote invite event. + """ + invite_room_id = f"!test_room{self._remote_invite_count}:remote_server" + + invite_event_dict = { + "room_id": invite_room_id, + "sender": "@inviter:remote_server", + "state_key": invitee_user_id, + "depth": 1, + "origin_server_ts": 1, + "type": EventTypes.Member, + "content": {"membership": Membership.INVITE}, + "auth_events": [], + "prev_events": [], + } + if unsigned_invite_room_state is not None: + serialized_stripped_state_events = [] + for stripped_event in unsigned_invite_room_state: + serialized_stripped_state_events.append( + { + "type": stripped_event.type, + "state_key": stripped_event.state_key, + "sender": stripped_event.sender, + "content": stripped_event.content, + } + ) + + invite_event_dict["unsigned"] = { + "invite_room_state": serialized_stripped_state_events + } + + invite_event = make_event_from_dict( + invite_event_dict, + room_version=RoomVersions.V10, + ) + invite_event.internal_metadata.outlier = True + invite_event.internal_metadata.out_of_band_membership = True + + self.get_success( + self.store.maybe_store_room_on_outlier_membership( + room_id=invite_room_id, room_version=invite_event.room_version + ) + ) + context = EventContext.for_outlier(self.hs.get_storage_controllers()) + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + persisted_event, _, _ = self.get_success( + persist_controller.persist_event(invite_event, context) + ) + + self._remote_invite_count += 1 + + return invite_room_id, persisted_event + + def _retract_remote_invite_for_user( + self, user_id: str, remote_room_id: str, invite_event_id: str + ) -> EventBase: + """ + Create a fake invite retraction for a remote room and persist it. + + Retracting an invite just means the person is no longer invited to the room. + This is done by someone with proper power levels kicking the user from the room. + A kick shows up as a leave event for a given person with a different `sender`. + + Args: + user_id: The person who was invited and we're going to retract the + invite for. + remote_room_id: The room ID that the invite was for. + invite_event_id: The event ID of the invite + + Returns: + The persisted leave (kick) event. + """ + + kick_event_dict = { + "room_id": remote_room_id, + "sender": "@inviter:remote_server", + "state_key": user_id, + "depth": 1, + "origin_server_ts": 1, + "type": EventTypes.Member, + "content": {"membership": Membership.LEAVE}, + "auth_events": [invite_event_id], + "prev_events": [], + } + + kick_event = make_event_from_dict( + kick_event_dict, + room_version=RoomVersions.V10, + ) + kick_event.internal_metadata.outlier = True + kick_event.internal_metadata.out_of_band_membership = True + + self.get_success( + self.store.maybe_store_room_on_outlier_membership( + room_id=remote_room_id, room_version=kick_event.room_version + ) + ) + context = EventContext.for_outlier(self.hs.get_storage_controllers()) + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + persisted_event, _, _ = self.get_success( + persist_controller.persist_event(kick_event, context) + ) + + return persisted_event + + +class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase): + """ + Tests to make sure the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` database tables are + populated and updated correctly as new events are sent. + """ + + def test_joined_room_with_no_info(self) -> None: + """ + Test joined room that doesn't have a room type, encryption, or name shows up in + `sliding_sync_joined_rooms`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # History visibility just happens to be the last event sent in the room + event_stream_ordering=state_map[ + (EventTypes.RoomHistoryVisibility, "") + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_joined_room_with_info(self) -> None: + """ + Test joined encrypted room with name shows up in `sliding_sync_joined_rooms`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id1, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + # Add a tombstone + self.helper.send_state( + room_id1, + EventTypes.Tombstone, + {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"}, + tok=user2_tok, + ) + + # User1 joins the room + self.helper.join(room_id1, user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # This should be whatever is the last event in the room + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + # Even though this room does have a name, is encrypted, and has a + # tombstone, user2 is the room creator and joined at the room creation + # time which didn't have this state set yet. + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_joined_space_room_with_info(self) -> None: + """ + Test joined space room with name shows up in `sliding_sync_joined_rooms`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user2_tok, + ) + + # User1 joins the room + user1_join_response = self.helper.join(space_room_id, user1_id, tok=user1_tok) + user1_join_event_pos = self.get_success( + self.store.get_position_for_event(user1_join_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(space_room_id) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {space_room_id}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[space_room_id], + _SlidingSyncJoinedRoomResult( + room_id=space_room_id, + event_stream_ordering=user1_join_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (space_room_id, user1_id), + (space_room_id, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + # Even though this room does have a name, user2 is the room creator and + # joined at the room creation time which didn't have this state set yet. + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_joined_room_with_state_updated(self) -> None: + """ + Test state derived info in `sliding_sync_joined_rooms` is updated when the + current state is updated. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + + # User1 joins the room + user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + user1_join_event_pos = self.get_success( + self.store.get_position_for_event(user1_join_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + event_stream_ordering=user1_join_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + + # Update the room name + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room was renamed"}, + tok=user2_tok, + ) + # Encrypt the room + encrypt_room_response = self.helper.send_state( + room_id1, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + encrypt_room_event_pos = self.get_success( + self.store.get_position_for_event(encrypt_room_response["event_id"]) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + # Make sure we see the new room name + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + event_stream_ordering=encrypt_room_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room was renamed", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_joined_room_is_bumped(self) -> None: + """ + Test that `event_stream_ordering` and `bump_stamp` is updated when a new bump + event is sent (`sliding_sync_joined_rooms`). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + + # User1 joins the room + user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + user1_join_event_pos = self.get_success( + self.store.get_position_for_event(user1_join_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + event_stream_ordering=user1_join_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + user1_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + user1_snapshot, + ) + # Holds the info according to the current state when the user joined + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + user2_snapshot, + ) + + # Send a new message to bump the room + event_response = self.helper.send(room_id1, "some message", tok=user1_tok) + event_pos = self.get_success( + self.store.get_position_for_event(event_response["event_id"]) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + # Make sure we see the new room name + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # Updated `event_stream_ordering` + event_stream_ordering=event_pos.stream, + # And since the event was a bump event, the `bump_stamp` should be updated + bump_stamp=event_pos.stream, + # The state is still the same (it didn't change) + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + user1_snapshot, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + user2_snapshot, + ) + + def test_joined_room_meta_state_reset(self) -> None: + """ + Test that a state reset on the room name is reflected in the + `sliding_sync_joined_rooms` table. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Make sure we see the new room name + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be whatever is the last event in the room + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + user1_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + user1_snapshot, + ) + # Holds the info according to the current state when the user joined (no room + # name when the room creator joined) + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + # Mock a state reset removing the room name state from the current state + message_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[state_map[(EventTypes.Name, "")].event_id], + auth_event_ids=[ + state_map[(EventTypes.Create, "")].event_id, + state_map[(EventTypes.Member, user1_id)].event_id, + ], + type=EventTypes.Message, + content={"body": "foo", "msgtype": "m.text"}, + sender=user1_id, + room_id=room_id, + room_version=RoomVersions.V10.identifier, + ) + ) + event_chunk = [message_tuple] + self.get_success( + self.persist_events_store._persist_events_and_state_updates( + room_id, + event_chunk, + state_delta_for_room=DeltaState( + # This is the state reset part. We're removing the room name state. + to_delete=[(EventTypes.Name, "")], + to_insert={}, + ), + new_forward_extremities={message_tuple[0].event_id}, + use_negative_stream_ordering=False, + inhibit_local_membership_updates=False, + new_event_links={}, + ) + ) + + # Make sure the state reset is reflected in the `sliding_sync_joined_rooms` table + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be whatever is the last event in the room + event_stream_ordering=message_tuple[ + 0 + ].internal_metadata.stream_ordering, + bump_stamp=message_tuple[0].internal_metadata.stream_ordering, + room_type=None, + # This was state reset back to None + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + # State reset shouldn't be reflected in the `sliding_sync_membership_snapshots` + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + # Snapshots haven't changed + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + user1_snapshot, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + def test_joined_room_fully_insert_on_state_update(self) -> None: + """ + Test that when an existing room updates it's state and we don't have a + corresponding row in `sliding_sync_joined_rooms` yet, we fully-insert the row + even though only a tiny piece of state changed. + + FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + foreground update for + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + https://github.com/element-hq/synapse/issues/17623) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Add a room name + self.helper.send_state( + room_id, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user1_tok, + ) + + # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made + # it into the table. This is to simulate an existing room (before we event added + # the sliding sync tables) not being in the `sliding_sync_joined_rooms` table + # yet. + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + desc="simulate existing room not being in the sliding_sync_joined_rooms table yet", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Encrypt the room + self.helper.send_state( + room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # The room should now be in the `sliding_sync_joined_rooms` table + # (fully-inserted with all of the state values). + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be whatever is the last event in the room + event_stream_ordering=state_map[ + (EventTypes.RoomEncryption, "") + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + + def test_joined_room_nothing_if_not_in_table_when_bumped(self) -> None: + """ + Test a new message being sent in an existing room when we don't have a + corresponding row in `sliding_sync_joined_rooms` yet; either nothing should + happen or we should fully-insert the row. We currently do nothing. + + FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + foreground update for + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + https://github.com/element-hq/synapse/issues/17623) + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Add a room name + self.helper.send_state( + room_id, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user1_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made + # it into the table. This is to simulate an existing room (before we event added + # the sliding sync tables) not being in the `sliding_sync_joined_rooms` table + # yet. + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + desc="simulate existing room not being in the sliding_sync_joined_rooms table yet", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Send a new message to bump the room + self.helper.send(room_id, "some message", tok=user1_tok) + + # Either nothing should happen or we should fully-insert the row. We currently + # do nothing for non-state events. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + def test_non_join_space_room_with_info(self) -> None: + """ + Test users who was invited shows up in `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user2_tok, + ) + # Encrypt the room + self.helper.send_state( + space_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + # Add a tombstone + self.helper.send_state( + space_room_id, + EventTypes.Tombstone, + {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"}, + tok=user2_tok, + ) + + # User1 is invited to the room + user1_invited_response = self.helper.invite( + space_room_id, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_invited_event_pos = self.get_success( + self.store.get_position_for_event(user1_invited_response["event_id"]) + ) + + # Update the room name after we are invited just to make sure + # we don't update non-join memberships when the room name changes. + rename_response = self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space was renamed"}, + tok=user2_tok, + ) + rename_event_pos = self.get_success( + self.store.get_position_for_event(rename_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(space_room_id) + ) + + # User2 is still joined to the room so we should still have an entry in the + # `sliding_sync_joined_rooms` table. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {space_room_id}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[space_room_id], + _SlidingSyncJoinedRoomResult( + room_id=space_room_id, + event_stream_ordering=rename_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=RoomTypes.SPACE, + room_name="my super duper space was renamed", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (space_room_id, user1_id), + (space_room_id, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user was invited + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invited_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=user1_invited_event_pos.stream, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_invite_ban(self) -> None: + """ + Test users who have invite/ban membership in room shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 is invited to the room + user1_invited_response = self.helper.invite( + room_id1, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_invited_event_pos = self.get_success( + self.store.get_position_for_event(user1_invited_response["event_id"]) + ) + + # User3 joins the room + self.helper.join(room_id1, user3_id, tok=user3_tok) + # User3 is banned from the room + user3_ban_response = self.helper.ban( + room_id1, src=user2_id, targ=user3_id, tok=user2_tok + ) + user3_ban_event_pos = self.get_success( + self.store.get_position_for_event(user3_ban_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # User2 is still joined to the room so we should still have an entry + # in the `sliding_sync_joined_rooms` table. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + event_stream_ordering=user3_ban_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + (room_id1, user3_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user was invited + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invited_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=user1_invited_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user was banned + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user3_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user3_id, + sender=user2_id, + membership_event_id=user3_ban_response["event_id"], + membership=Membership.BAN, + event_stream_ordering=user3_ban_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_reject_invite_empty_room(self) -> None: + """ + In a room where no one is joined (`no_longer_in_room`), test rejecting an invite. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 is invited to the room + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # User2 leaves the room + user2_leave_response = self.helper.leave(room_id1, user2_id, tok=user2_tok) + user2_leave_event_pos = self.get_success( + self.store.get_position_for_event(user2_leave_response["event_id"]) + ) + + # User1 rejects the invite + user1_leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) + user1_leave_event_pos = self.get_success( + self.store.get_position_for_event(user1_leave_response["event_id"]) + ) + + # No one is joined to the room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user left + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=user1_leave_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=user1_leave_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the left + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=user2_leave_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=user2_leave_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_changing(self) -> None: + """ + Test latest snapshot evolves when membership changes (`sliding_sync_membership_snapshots`). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 is invited to the room + # ====================================================== + user1_invited_response = self.helper.invite( + room_id1, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_invited_event_pos = self.get_success( + self.store.get_position_for_event(user1_invited_response["event_id"]) + ) + + # Update the room name after the user was invited + room_name_update_response = self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + room_name_update_event_pos = self.get_success( + self.store.get_position_for_event(room_name_update_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Assert joined room status + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # Latest event in the room + event_stream_ordering=room_name_update_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + # Assert membership snapshots + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user was invited + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invited_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=user1_invited_event_pos.stream, + has_known_state=True, + room_type=None, + # Room name was updated after the user was invited so we should still + # see it unset here + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + user2_snapshot, + ) + + # User1 joins the room + # ====================================================== + user1_joined_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + user1_joined_event_pos = self.get_success( + self.store.get_position_for_event(user1_joined_response["event_id"]) + ) + + # Assert joined room status + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # Latest event in the room + event_stream_ordering=user1_joined_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + # Assert membership snapshots + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=user1_joined_response["event_id"], + membership=Membership.JOIN, + event_stream_ordering=user1_joined_event_pos.stream, + has_known_state=True, + room_type=None, + # We see the update state because the user joined after the room name + # change + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + user2_snapshot, + ) + + # User1 is banned from the room + # ====================================================== + user1_ban_response = self.helper.ban( + room_id1, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_ban_event_pos = self.get_success( + self.store.get_position_for_event(user1_ban_response["event_id"]) + ) + + # Assert joined room status + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # Latest event in the room + event_stream_ordering=user1_ban_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + # Assert membership snapshots + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user was banned + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_ban_response["event_id"], + membership=Membership.BAN, + event_stream_ordering=user1_ban_event_pos.stream, + has_known_state=True, + room_type=None, + # We see the update state because the user joined after the room name + # change + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + user2_snapshot, + ) + + def test_non_join_server_left_room(self) -> None: + """ + Test everyone local leaves the room but their leave membership still shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 joins the room + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # User2 leaves the room + user2_leave_response = self.helper.leave(room_id1, user2_id, tok=user2_tok) + user2_leave_event_pos = self.get_success( + self.store.get_position_for_event(user2_leave_response["event_id"]) + ) + + # User1 leaves the room + user1_leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) + user1_leave_event_pos = self.get_success( + self.store.get_position_for_event(user1_leave_response["event_id"]) + ) + + # No one is joined to the room anymore so we shouldn't have an entry in the + # `sliding_sync_joined_rooms` table. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # We should still see rows for the leave events (non-joins) + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=user1_leave_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=user1_leave_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=user2_leave_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=user2_leave_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + @parameterized.expand( + [ + # No stripped state provided + ("none", None), + # Empty stripped state provided + ("empty", []), + ] + ) + def test_non_join_remote_invite_no_stripped_state( + self, _description: str, stripped_state: Optional[List[StrippedStateEvent]] + ) -> None: + """ + Test remote invite with no stripped state provided shows up in + `sliding_sync_membership_snapshots` with `has_known_state=False`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room without any `unsigned.invite_room_state` + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user(user1_id, stripped_state) + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=remote_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering, + # No stripped state provided + has_known_state=False, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_remote_invite_unencrypted_room(self) -> None: + """ + Test remote invite with stripped state (unencrypted room) shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper room", + }, + ), + ], + ) + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=remote_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_remote_invite_encrypted_room(self) -> None: + """ + Test remote invite with stripped state (encrypted room) shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + # This is not one of the stripped state events according to the state + # but we still handle it. + StrippedStateEvent( + type=EventTypes.Tombstone, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room", + }, + ), + # Also test a random event that we don't care about + StrippedStateEvent( + type="org.matrix.foo_state", + state_key="", + sender="@inviter:remote_server", + content={ + "foo": "qux", + }, + ), + ], + ) + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=remote_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + + def test_non_join_remote_invite_space_room(self) -> None: + """ + Test remote invite with stripped state (encrypted space room with name) shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + # Specify that it is a space room + EventContentFields.ROOM_TYPE: RoomTypes.SPACE, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper space", + }, + ), + ], + ) + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=remote_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_reject_remote_invite(self) -> None: + """ + Test rejected remote invite (user decided to leave the room) inherits meta data + from when the remote invite stripped state and shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + ) + + # User1 decides to leave the room (reject the invite) + user1_leave_response = self.helper.leave( + remote_invite_room_id, user1_id, tok=user1_tok + ) + user1_leave_pos = self.get_success( + self.store.get_position_for_event(user1_leave_response["event_id"]) + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=user1_leave_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=user1_leave_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_retracted_remote_invite(self) -> None: + """ + Test retracted remote invite (Remote inviter kicks the person who was invited) + inherits meta data from when the remote invite stripped state and shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + ) + + # `@inviter:remote_server` decides to retract the invite (kicks the user). + # (Note: A kick is just a leave event with a different sender) + remote_invite_retraction_event = self._retract_remote_invite_for_user( + user_id=user1_id, + remote_room_id=remote_invite_room_id, + invite_event_id=remote_invite_event.event_id, + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=remote_invite_retraction_event.event_id, + membership=Membership.LEAVE, + event_stream_ordering=remote_invite_retraction_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_state_reset(self) -> None: + """ + Test a state reset that removes someone from the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Make sure we see the new room name + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be whatever is the last event in the room + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + user1_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + user1_snapshot, + ) + # Holds the info according to the current state when the user joined (no room + # name when the room creator joined) + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + # Mock a state reset removing the membership for user1 in the current state + message_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[state_map[(EventTypes.Name, "")].event_id], + auth_event_ids=[ + state_map[(EventTypes.Create, "")].event_id, + state_map[(EventTypes.Member, user1_id)].event_id, + ], + type=EventTypes.Message, + content={"body": "foo", "msgtype": "m.text"}, + sender=user1_id, + room_id=room_id, + room_version=RoomVersions.V10.identifier, + ) + ) + event_chunk = [message_tuple] + self.get_success( + self.persist_events_store._persist_events_and_state_updates( + room_id, + event_chunk, + state_delta_for_room=DeltaState( + # This is the state reset part. We're removing the room name state. + to_delete=[(EventTypes.Member, user1_id)], + to_insert={}, + ), + new_forward_extremities={message_tuple[0].event_id}, + use_negative_stream_ordering=False, + inhibit_local_membership_updates=False, + new_event_links={}, + ) + ) + + # State reset on membership doesn't affect the`sliding_sync_joined_rooms` table + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be whatever is the last event in the room + event_stream_ordering=message_tuple[ + 0 + ].internal_metadata.stream_ordering, + bump_stamp=message_tuple[0].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + # State reset on membership should remove the user's snapshot + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + # We shouldn't see user1 in the snapshots table anymore + (room_id, user2_id), + }, + exact=True, + ) + # Snapshot for user2 hasn't changed + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + def test_membership_snapshot_forget(self) -> None: + """ + Test forgetting a room will update `sliding_sync_membership_snapshots` + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + # User1 leaves the room (we have to leave in order to forget the room) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + + # Check on the `sliding_sync_membership_snapshots` table (nothing should be + # forgotten yet) + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + user1_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.LEAVE, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + # Room is not forgotten + forgotten=False, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + user1_snapshot, + ) + # Holds the info according to the current state when the user joined + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + # Forget the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Check on the `sliding_sync_membership_snapshots` table + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + # Room is now forgotten for user1 + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + attr.evolve(user1_snapshot, forgotten=True), + ) + # Nothing changed for user2 + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + def test_membership_snapshot_missing_forget( + self, + ) -> None: + """ + Test forgetting a room with no existing row in `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + # User1 leaves the room (we have to leave in order to forget the room) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id,), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_missing", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Forget the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # It doesn't explode + + # We still shouldn't find anything in the table because nothing has re-created them + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + +class SlidingSyncTablesBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase): + """ + Test the background updates that populate the `sliding_sync_joined_rooms` and + `sliding_sync_membership_snapshots` tables. + """ + + def test_joined_background_update_missing(self) -> None: + """ + Test that the background update for `sliding_sync_joined_rooms` populates missing rows + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_no_info = self.helper.create_room_as(user1_id, tok=user1_tok) + + room_id_with_info = self.helper.create_room_as(user1_id, tok=user1_tok) + # Add a room name + self.helper.send_state( + room_id_with_info, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user1_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id_with_info, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user1_tok, + ) + + # Clean-up the `sliding_sync_joined_rooms` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_joined_rooms", + column="room_id", + iterable=(room_id_no_info, room_id_with_info, space_room_id), + keyvalues={}, + desc="sliding_sync_joined_rooms.test_joined_background_update_missing", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background updates. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE, + "progress_json": "{}", + "depends_on": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id_no_info, room_id_with_info, space_room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id_no_info) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id_no_info], + _SlidingSyncJoinedRoomResult( + room_id=room_id_no_info, + # History visibility just happens to be the last event sent in the room + event_stream_ordering=state_map[ + (EventTypes.RoomHistoryVisibility, "") + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id_with_info) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id_with_info], + _SlidingSyncJoinedRoomResult( + room_id=room_id_with_info, + # Lastest event sent in the room + event_stream_ordering=state_map[ + (EventTypes.RoomEncryption, "") + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(space_room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[space_room_id], + _SlidingSyncJoinedRoomResult( + room_id=space_room_id, + # Lastest event sent in the room + event_stream_ordering=state_map[ + (EventTypes.Name, "") + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_joined(self) -> None: + """ + Test that the background update for `sliding_sync_membership_snapshots` + populates missing rows for join memberships. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_no_info = self.helper.create_room_as(user1_id, tok=user1_tok) + + room_id_with_info = self.helper.create_room_as(user1_id, tok=user1_tok) + # Add a room name + self.helper.send_state( + room_id_with_info, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user1_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id_with_info, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + # Add a tombstone + self.helper.send_state( + room_id_with_info, + EventTypes.Tombstone, + {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"}, + tok=user1_tok, + ) + + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user1_tok, + ) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id_no_info, room_id_with_info, space_room_id), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_joined", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id_no_info, user1_id), + (room_id_with_info, user1_id), + (space_room_id, user1_id), + }, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id_no_info) + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_no_info, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id_with_info) + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_with_info, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_with_info, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(space_room_id) + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_local_invite(self) -> None: + """ + Test that the background update for `sliding_sync_membership_snapshots` + populates missing rows for invite memberships. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_no_info = self.helper.create_room_as(user2_id, tok=user2_tok) + + room_id_with_info = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id_with_info, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id_with_info, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + # Add a tombstone + self.helper.send_state( + room_id_with_info, + EventTypes.Tombstone, + {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"}, + tok=user2_tok, + ) + + space_room_id = self.helper.create_room_as( + user1_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user2_tok, + ) + + # Invite user1 to the rooms + user1_invite_room_id_no_info_response = self.helper.invite( + room_id_no_info, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_invite_room_id_with_info_response = self.helper.invite( + room_id_with_info, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_invite_space_room_id_response = self.helper.invite( + space_room_id, src=user2_id, targ=user1_id, tok=user2_tok + ) + + # Have user2 leave the rooms to make sure that our background update is not just + # reading from `current_state_events`. For invite/knock memberships, we should + # be reading from the stripped state on the invite/knock event itself. + self.helper.leave(room_id_no_info, user2_id, tok=user2_tok) + self.helper.leave(room_id_with_info, user2_id, tok=user2_tok) + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + # Check to make sure we actually don't have any `current_state_events` for the rooms + current_state_check_rows = self.get_success( + self.store.db_pool.simple_select_many_batch( + table="current_state_events", + column="room_id", + iterable=[room_id_no_info, room_id_with_info, space_room_id], + retcols=("event_id",), + keyvalues={}, + desc="check current_state_events in test", + ) + ) + self.assertEqual(len(current_state_check_rows), 0) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id_no_info, room_id_with_info, space_room_id), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_local_invite", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + # The invite memberships for user1 + (room_id_no_info, user1_id), + (room_id_with_info, user1_id), + (space_room_id, user1_id), + # The leave memberships for user2 + (room_id_no_info, user2_id), + (room_id_with_info, user2_id), + (space_room_id, user2_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_no_info, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invite_room_id_no_info_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_invite_room_id_no_info_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_with_info, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_with_info, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invite_room_id_with_info_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_invite_room_id_with_info_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + # The tombstone isn't showing here ("another_room") because it's not one + # of the stripped events that we hand out as part of the invite event. + # Even though we handle this scenario from other remote homservers, + # Synapse does not include the tombstone in the invite event. + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invite_space_room_id_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_invite_space_room_id_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_remote_invite( + self, + ) -> None: + """ + Test that the background update for `sliding_sync_membership_snapshots` + populates missing rows for remote invites (out-of-band memberships). + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_unknown_state, room_id_unknown_state_invite_event = ( + self._create_remote_invite_room_for_user(user1_id, None) + ) + + room_id_no_info, room_id_no_info_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + ], + ) + ) + + room_id_with_info, room_id_with_info_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper room", + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + ) + + space_room_id, space_room_id_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + EventContentFields.ROOM_TYPE: RoomTypes.SPACE, + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper space", + }, + ), + ], + ) + ) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=( + room_id_unknown_state, + room_id_no_info, + room_id_with_info, + space_room_id, + ), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_remote_invite", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + # The invite memberships for user1 + (room_id_unknown_state, user1_id), + (room_id_no_info, user1_id), + (room_id_with_info, user1_id), + (space_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_unknown_state, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_unknown_state, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=room_id_unknown_state_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=room_id_unknown_state_invite_event.internal_metadata.stream_ordering, + has_known_state=False, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_no_info, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=room_id_no_info_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=room_id_no_info_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_with_info, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_with_info, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=room_id_with_info_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=room_id_with_info_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=space_room_id_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=space_room_id_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_remote_invite_rejections_and_retractions( + self, + ) -> None: + """ + Test that the background update for `sliding_sync_membership_snapshots` + populates missing rows for remote invite rejections/retractions (out-of-band memberships). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_unknown_state, room_id_unknown_state_invite_event = ( + self._create_remote_invite_room_for_user(user1_id, None) + ) + + room_id_no_info, room_id_no_info_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + ], + ) + ) + + room_id_with_info, room_id_with_info_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper room", + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + ) + + space_room_id, space_room_id_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + EventContentFields.ROOM_TYPE: RoomTypes.SPACE, + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper space", + }, + ), + ], + ) + ) + + # Reject the remote invites. + # Also try retracting a remote invite. + room_id_unknown_state_leave_event_response = self.helper.leave( + room_id_unknown_state, user1_id, tok=user1_tok + ) + room_id_no_info_leave_event = self._retract_remote_invite_for_user( + user_id=user1_id, + remote_room_id=room_id_no_info, + invite_event_id=room_id_no_info_invite_event.event_id, + ) + room_id_with_info_leave_event_response = self.helper.leave( + room_id_with_info, user1_id, tok=user1_tok + ) + space_room_id_leave_event = self._retract_remote_invite_for_user( + user_id=user1_id, + remote_room_id=space_room_id, + invite_event_id=space_room_id_invite_event.event_id, + ) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=( + room_id_unknown_state, + room_id_no_info, + room_id_with_info, + space_room_id, + ), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_remote_invite_rejections_and_retractions", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + # The invite memberships for user1 + (room_id_unknown_state, user1_id), + (room_id_no_info, user1_id), + (room_id_with_info, user1_id), + (space_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_unknown_state, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_unknown_state, + user_id=user1_id, + sender=user1_id, + membership_event_id=room_id_unknown_state_leave_event_response[ + "event_id" + ], + membership=Membership.LEAVE, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + room_id_unknown_state_leave_event_response["event_id"] + ) + ).stream, + has_known_state=False, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_no_info, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=room_id_no_info_leave_event.event_id, + membership=Membership.LEAVE, + event_stream_ordering=room_id_no_info_leave_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_with_info, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_with_info, + user_id=user1_id, + sender=user1_id, + membership_event_id=room_id_with_info_leave_event_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + room_id_with_info_leave_event_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=space_room_id_leave_event.event_id, + membership=Membership.LEAVE, + event_stream_ordering=space_room_id_leave_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + @parameterized.expand( + [ + # We'll do a kick for this + (Membership.LEAVE,), + (Membership.BAN,), + ] + ) + def test_membership_snapshots_background_update_historical_state( + self, test_membership: str + ) -> None: + """ + Test that the background update for `sliding_sync_membership_snapshots` + populates missing rows for leave memberships. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_no_info = self.helper.create_room_as(user2_id, tok=user2_tok) + + room_id_with_info = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id_with_info, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id_with_info, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + # Add a tombstone + self.helper.send_state( + room_id_with_info, + EventTypes.Tombstone, + {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"}, + tok=user2_tok, + ) + + space_room_id = self.helper.create_room_as( + user1_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user2_tok, + ) + + # Join the room in preparation for our test_membership + self.helper.join(room_id_no_info, user1_id, tok=user1_tok) + self.helper.join(room_id_with_info, user1_id, tok=user1_tok) + self.helper.join(space_room_id, user1_id, tok=user1_tok) + + if test_membership == Membership.LEAVE: + # Kick user1 from the rooms + user1_membership_room_id_no_info_response = self.helper.change_membership( + room=room_id_no_info, + src=user2_id, + targ=user1_id, + tok=user2_tok, + membership=Membership.LEAVE, + extra_data={ + "reason": "Bad manners", + }, + ) + user1_membership_room_id_with_info_response = self.helper.change_membership( + room=room_id_with_info, + src=user2_id, + targ=user1_id, + tok=user2_tok, + membership=Membership.LEAVE, + extra_data={ + "reason": "Bad manners", + }, + ) + user1_membership_space_room_id_response = self.helper.change_membership( + room=space_room_id, + src=user2_id, + targ=user1_id, + tok=user2_tok, + membership=Membership.LEAVE, + extra_data={ + "reason": "Bad manners", + }, + ) + elif test_membership == Membership.BAN: + # Ban user1 from the rooms + user1_membership_room_id_no_info_response = self.helper.ban( + room_id_no_info, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_membership_room_id_with_info_response = self.helper.ban( + room_id_with_info, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_membership_space_room_id_response = self.helper.ban( + space_room_id, src=user2_id, targ=user1_id, tok=user2_tok + ) + else: + raise AssertionError("Unknown test_membership") + + # Have user2 leave the rooms to make sure that our background update is not just + # reading from `current_state_events`. For leave memberships, we should be + # reading from the historical state. + self.helper.leave(room_id_no_info, user2_id, tok=user2_tok) + self.helper.leave(room_id_with_info, user2_id, tok=user2_tok) + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + # Check to make sure we actually don't have any `current_state_events` for the rooms + current_state_check_rows = self.get_success( + self.store.db_pool.simple_select_many_batch( + table="current_state_events", + column="room_id", + iterable=[room_id_no_info, room_id_with_info, space_room_id], + retcols=("event_id",), + keyvalues={}, + desc="check current_state_events in test", + ) + ) + self.assertEqual(len(current_state_check_rows), 0) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id_no_info, room_id_with_info, space_room_id), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_historical_state", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + # The memberships for user1 + (room_id_no_info, user1_id), + (room_id_with_info, user1_id), + (space_room_id, user1_id), + # The leave memberships for user2 + (room_id_no_info, user2_id), + (room_id_with_info, user2_id), + (space_room_id, user2_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_no_info, + user_id=user1_id, + # Because user2 kicked/banned user1 from the room + sender=user2_id, + membership_event_id=user1_membership_room_id_no_info_response[ + "event_id" + ], + membership=test_membership, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_membership_room_id_no_info_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_with_info, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_with_info, + user_id=user1_id, + # Because user2 kicked/banned user1 from the room + sender=user2_id, + membership_event_id=user1_membership_room_id_with_info_response[ + "event_id" + ], + membership=test_membership, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_membership_room_id_with_info_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + # Because user2 kicked/banned user1 from the room + sender=user2_id, + membership_event_id=user1_membership_space_room_id_response["event_id"], + membership=test_membership, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_membership_space_room_id_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_forgotten_missing(self) -> None: + """ + Test that a new row is inserted into `sliding_sync_membership_snapshots` when it + doesn't exist in the table yet. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + # User1 leaves the room (we have to leave in order to forget the room) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + + # Forget the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id,), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_missing", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.LEAVE, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + # Room is forgotten + forgotten=True, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_forgotten_partial(self) -> None: + """ + Test an existing `sliding_sync_membership_snapshots` row is updated with the + latest `forgotten` status after the background update passes over it. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + # User1 leaves the room (we have to leave in order to forget the room) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + + # Forget the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Clean-up the `sliding_sync_joined_rooms` table as if the forgotten status + # never made it into the table. + self.get_success( + self.store.db_pool.simple_update( + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id}, + updatevalues={"forgotten": 0}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_partial", + ) + ) + + # We should see the partial row that we made in preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + user1_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.LEAVE, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + # Room is *not* forgotten because of our test preparation + forgotten=False, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + user1_snapshot, + ) + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + # Forgotten status is now updated + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + attr.evolve(user1_snapshot, forgotten=True), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + +class SlidingSyncTablesCatchUpBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase): + """ + Test the background updates for catch-up after Synapse downgrade to populate the + `sliding_sync_joined_rooms` and `sliding_sync_membership_snapshots` tables. + + This to test the "catch-up" version of the background update vs the "normal" + background update to populate the tables with all of the historical data. Both + versions share the same background update but just serve different purposes. We + check if the "catch-up" version needs to run on start-up based on whether there have + been any changes to rooms that aren't reflected in the sliding sync tables. + + FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + foreground update for + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + https://github.com/element-hq/synapse/issues/17623) + """ + + def test_joined_background_update_catch_up_new_room(self) -> None: + """ + Test that new rooms while Synapse is downgraded (making + `sliding_sync_joined_rooms` stale) will be caught when Synapse is upgraded and + the catch-up routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate new events + # being sent while Synapse was downgraded. + self.wait_for_background_updates() + + # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made + # it into the table. This is to simulate the a new room while Synapse was + # downgraded. + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + desc="simulate new room while Synapse was downgraded", + ) + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_joined_rooms_table", + _resolve_stale_data_in_sliding_sync_joined_rooms_table, + ) + ) + + # We shouldn't see any new data yet + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + + def test_joined_background_update_catch_up_room_state_change(self) -> None: + """ + Test that new events while Synapse is downgraded (making + `sliding_sync_joined_rooms` stale) will be caught when Synapse is upgraded and + the catch-up routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Get a snapshot of the `sliding_sync_joined_rooms` table before we add some state + sliding_sync_joined_rooms_results_before_state = ( + self._get_sliding_sync_joined_rooms() + ) + self.assertIncludes( + set(sliding_sync_joined_rooms_results_before_state.keys()), + {room_id}, + exact=True, + ) + + # Add a room name + self.helper.send_state( + room_id, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user1_tok, + ) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate new events + # being sent while Synapse was downgraded. + self.wait_for_background_updates() + + # Clean-up the `sliding_sync_joined_rooms` table as if the the room name + # never made it into the table. This is to simulate the room name event + # being sent while Synapse was downgraded. + self.get_success( + self.store.db_pool.simple_update( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + updatevalues={ + # Clear the room name + "room_name": None, + # Reset the `event_stream_ordering` back to the value before the room name + "event_stream_ordering": sliding_sync_joined_rooms_results_before_state[ + room_id + ].event_stream_ordering, + }, + desc="simulate new events while Synapse was downgraded", + ) + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_joined_rooms_table", + _resolve_stale_data_in_sliding_sync_joined_rooms_table, + ) + ) + + # Ensure that the stale data is deleted from the table + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + + def test_joined_background_update_catch_up_no_rooms(self) -> None: + """ + Test that if you start your homeserver with no rooms on a Synapse version that + supports the sliding sync tables and the historical background update completes + (because no rooms to process), then Synapse is downgraded and new rooms are + created/joined; when Synapse is upgraded, the rooms will be processed catch-up + routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate room being + # created while Synapse was downgraded. + self.wait_for_background_updates() + + # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made + # it into the table. This is to simulate the room being created while Synapse + # was downgraded. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_joined_rooms", + column="room_id", + iterable=(room_id,), + keyvalues={}, + desc="simulate room being created while Synapse was downgraded", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_joined_rooms_table", + _resolve_stale_data_in_sliding_sync_joined_rooms_table, + ) + ) + + # We still shouldn't find any data yet + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + + def test_membership_snapshots_background_update_catch_up_new_membership( + self, + ) -> None: + """ + Test that completely new membership while Synapse is downgraded (making + `sliding_sync_membership_snapshots` stale) will be caught when Synapse is + upgraded and the catch-up routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # User2 joins the room + self.helper.join(room_id, user2_id, tok=user2_tok) + + # Both users are joined to the room + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate new events + # being sent while Synapse was downgraded. + self.wait_for_background_updates() + + # Clean-up the `sliding_sync_membership_snapshots` table as if the user2 + # membership never made it into the table. This is to simulate a membership + # change while Synapse was downgraded. + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id, "user_id": user2_id}, + desc="simulate new membership while Synapse was downgraded", + ) + ) + + # We shouldn't find the user2 membership in the table because we just deleted it + # in preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + }, + exact=True, + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_membership_snapshots_table", + _resolve_stale_data_in_sliding_sync_membership_snapshots_table, + ) + ) + + # We still shouldn't find any data yet + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + }, + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + + def test_membership_snapshots_background_update_catch_up_membership_change( + self, + ) -> None: + """ + Test that membership changes while Synapse is downgraded (making + `sliding_sync_membership_snapshots` stale) will be caught when Synapse is upgraded and + the catch-up routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # User2 joins the room + self.helper.join(room_id, user2_id, tok=user2_tok) + + # Both users are joined to the room + sliding_sync_membership_snapshots_results_before_membership_changes = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set( + sliding_sync_membership_snapshots_results_before_membership_changes.keys() + ), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + + # User2 leaves the room + self.helper.leave(room_id, user2_id, tok=user2_tok) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate new events + # being sent while Synapse was downgraded. + self.wait_for_background_updates() + + # Rollback the `sliding_sync_membership_snapshots` table as if the user2 + # membership never made it into the table. This is to simulate a membership + # change while Synapse was downgraded. + self.get_success( + self.store.db_pool.simple_update( + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id, "user_id": user2_id}, + updatevalues={ + # Reset everything back to the value before user2 left the room + "membership": sliding_sync_membership_snapshots_results_before_membership_changes[ + (room_id, user2_id) + ].membership, + "membership_event_id": sliding_sync_membership_snapshots_results_before_membership_changes[ + (room_id, user2_id) + ].membership_event_id, + "event_stream_ordering": sliding_sync_membership_snapshots_results_before_membership_changes[ + (room_id, user2_id) + ].event_stream_ordering, + }, + desc="simulate membership change while Synapse was downgraded", + ) + ) + + # We should see user2 still joined to the room because we made that change in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + sliding_sync_membership_snapshots_results_before_membership_changes[ + (room_id, user1_id) + ], + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + sliding_sync_membership_snapshots_results_before_membership_changes[ + (room_id, user2_id) + ], + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_membership_snapshots_table", + _resolve_stale_data_in_sliding_sync_membership_snapshots_table, + ) + ) + + # Ensure that the stale data is deleted from the table + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + }, + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + + def test_membership_snapshots_background_update_catch_up_no_membership( + self, + ) -> None: + """ + Test that if you start your homeserver with no rooms on a Synapse version that + supports the sliding sync tables and the historical background update completes + (because no rooms/membership to process), then Synapse is downgraded and new + rooms are created/joined; when Synapse is upgraded, the rooms will be processed + catch-up routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # User2 joins the room + self.helper.join(room_id, user2_id, tok=user2_tok) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate new events + # being sent while Synapse was downgraded. + self.wait_for_background_updates() + + # Rollback the `sliding_sync_membership_snapshots` table as if the user2 + # membership never made it into the table. This is to simulate a membership + # change while Synapse was downgraded. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id,), + keyvalues={}, + desc="simulate room being created while Synapse was downgraded", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_membership_snapshots_table", + _resolve_stale_data_in_sliding_sync_membership_snapshots_table, + ) + ) + + # We still shouldn't find any data yet + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) diff --git a/tests/unittest.py b/tests/unittest.py index 4aa7f56106..2532fa49fb 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -272,8 +272,8 @@ class TestCase(unittest.TestCase): def assertIncludes( self, - actual_items: AbstractSet[str], - expected_items: AbstractSet[str], + actual_items: AbstractSet[TV], + expected_items: AbstractSet[TV], exact: bool = False, message: Optional[str] = None, ) -> None: From 2999a14aed298ad4358e280fa9938010f1edd0d7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Aug 2024 16:22:57 +0100 Subject: [PATCH 169/332] Sliding Sync: Make `PerConnectionState` immutable (#17600) This is so that we can cache it. We also move the sliding sync types to `synapse/types/handlers/sliding_sync.py`. This is mainly in-prep for #17599 to avoid circular imports. The only change in behaviour is that `RoomSyncConfig.combine_sync_config(..)` now returns a new room sync config rather than mutating in-place. Reviewable commit-by-commit. --------- Co-authored-by: Eric Eastwood --- changelog.d/17600.misc | 1 + scripts-dev/mypy_synapse_plugin.py | 19 +- synapse/handlers/sliding_sync/__init__.py | 16 +- synapse/handlers/sliding_sync/extensions.py | 14 +- synapse/handlers/sliding_sync/room_lists.py | 34 +- synapse/handlers/sliding_sync/store.py | 10 +- synapse/types/handlers/__init__.py | 357 +-------------- .../handlers/sliding_sync.py} | 413 ++++++++++++++++-- tests/handlers/test_sliding_sync.py | 23 +- 9 files changed, 442 insertions(+), 445 deletions(-) create mode 100644 changelog.d/17600.misc rename synapse/{handlers/sliding_sync/types.py => types/handlers/sliding_sync.py} (52%) diff --git a/changelog.d/17600.misc b/changelog.d/17600.misc new file mode 100644 index 0000000000..a81c67f6d1 --- /dev/null +++ b/changelog.d/17600.misc @@ -0,0 +1 @@ +Make the sliding sync `PerConnectionState` class immutable. diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 877b831751..509047b41b 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -38,6 +38,7 @@ from mypy.types import ( NoneType, TupleType, TypeAliasType, + TypeVarType, UninhabitedType, UnionType, ) @@ -233,6 +234,7 @@ IMMUTABLE_CUSTOM_TYPES = { "synapse.synapse_rust.push.FilteredPushRules", # This is technically not immutable, but close enough. "signedjson.types.VerifyKey", + "synapse.types.StrCollection", } # Immutable containers only if the values are also immutable. @@ -298,7 +300,7 @@ def is_cacheable( elif rt.type.fullname in MUTABLE_CONTAINER_TYPES: # Mutable containers are mutable regardless of their underlying type. - return False, None + return False, f"container {rt.type.fullname} is mutable" elif "attrs" in rt.type.metadata: # attrs classes are only cachable iff it is frozen (immutable itself) @@ -318,6 +320,9 @@ def is_cacheable( else: return False, "non-frozen attrs class" + elif rt.type.is_enum: + # We assume Enum values are immutable + return True, None else: # Ensure we fail for unknown types, these generally means that the # above code is not complete. @@ -326,6 +331,18 @@ def is_cacheable( f"Don't know how to handle {rt.type.fullname} return type instance", ) + elif isinstance(rt, TypeVarType): + # We consider TypeVars immutable if they are bound to a set of immutable + # types. + if rt.values: + for value in rt.values: + ok, note = is_cacheable(value, signature, verbose) + if not ok: + return False, f"TypeVar bound not cacheable {value}" + return True, None + + return False, "TypeVar is unbound" + elif isinstance(rt, NoneType): # None is cachable. return True, None diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index c34ba83cd6..7d4f6415c0 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -29,13 +29,6 @@ from synapse.handlers.sliding_sync.room_lists import ( _RoomMembershipForUser, ) from synapse.handlers.sliding_sync.store import SlidingSyncConnectionStore -from synapse.handlers.sliding_sync.types import ( - HaveSentRoomFlag, - MutablePerConnectionState, - PerConnectionState, - RoomSyncConfig, - StateValues, -) from synapse.logging.opentracing import ( SynapseTags, log_kv, @@ -57,10 +50,15 @@ from synapse.types import ( StreamKeyType, StreamToken, ) -from synapse.types.handlers import ( - SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, +from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES +from synapse.types.handlers.sliding_sync import ( + HaveSentRoomFlag, + MutablePerConnectionState, + PerConnectionState, + RoomSyncConfig, SlidingSyncConfig, SlidingSyncResult, + StateValues, ) from synapse.types.state import StateFilter from synapse.util.async_helpers import concurrently_execute diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index a2d4f24f9c..d9f4c56e6e 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -20,11 +20,6 @@ from typing_extensions import assert_never from synapse.api.constants import AccountDataTypes, EduTypes from synapse.handlers.receipts import ReceiptEventSource -from synapse.handlers.sliding_sync.types import ( - HaveSentRoomFlag, - MutablePerConnectionState, - PerConnectionState, -) from synapse.logging.opentracing import trace from synapse.storage.databases.main.receipts import ReceiptInRoom from synapse.types import ( @@ -35,7 +30,14 @@ from synapse.types import ( StrCollection, StreamToken, ) -from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult +from synapse.types.handlers.sliding_sync import ( + HaveSentRoomFlag, + MutablePerConnectionState, + OperationType, + PerConnectionState, + SlidingSyncConfig, + SlidingSyncResult, +) if TYPE_CHECKING: from synapse.server import HomeServer diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 4718e8092b..0e6cb28524 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -40,11 +40,6 @@ from synapse.api.constants import ( ) from synapse.events import StrippedStateEvent from synapse.events.utils import parse_stripped_state_event -from synapse.handlers.sliding_sync.types import ( - HaveSentRoomFlag, - PerConnectionState, - RoomSyncConfig, -) from synapse.logging.opentracing import start_active_span, trace from synapse.storage.databases.main.state import ( ROOM_UNKNOWN_SENTINEL, @@ -61,7 +56,14 @@ from synapse.types import ( StreamToken, UserID, ) -from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult +from synapse.types.handlers.sliding_sync import ( + HaveSentRoomFlag, + OperationType, + PerConnectionState, + RoomSyncConfig, + SlidingSyncConfig, + SlidingSyncResult, +) from synapse.types.state import StateFilter if TYPE_CHECKING: @@ -279,15 +281,11 @@ class SlidingSyncRoomLists: room_id ) if existing_room_sync_config is not None: - existing_room_sync_config.combine_room_sync_config( + room_sync_config = existing_room_sync_config.combine_room_sync_config( room_sync_config ) - else: - # Make a copy so if we modify it later, it doesn't - # affect all references. - relevant_room_map[room_id] = ( - room_sync_config.deep_copy() - ) + + relevant_room_map[room_id] = room_sync_config room_ids_in_list.append(room_id) @@ -351,11 +349,13 @@ class SlidingSyncRoomLists: # and need to fetch more info about. existing_room_sync_config = relevant_room_map.get(room_id) if existing_room_sync_config is not None: - existing_room_sync_config.combine_room_sync_config( - room_sync_config + room_sync_config = ( + existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) ) - else: - relevant_room_map[room_id] = room_sync_config + + relevant_room_map[room_id] = room_sync_config # Filtered subset of `relevant_room_map` for rooms that may have updates # (in the event stream) diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py index 3b727432fb..e38fe3556f 100644 --- a/synapse/handlers/sliding_sync/store.py +++ b/synapse/handlers/sliding_sync/store.py @@ -18,13 +18,13 @@ from typing import TYPE_CHECKING, Dict, Optional, Tuple import attr from synapse.api.errors import SlidingSyncUnknownPosition -from synapse.handlers.sliding_sync.types import ( - MutablePerConnectionState, - PerConnectionState, -) from synapse.logging.opentracing import trace from synapse.types import SlidingSyncStreamToken -from synapse.types.handlers import SlidingSyncConfig +from synapse.types.handlers.sliding_sync import ( + MutablePerConnectionState, + PerConnectionState, + SlidingSyncConfig, +) if TYPE_CHECKING: pass diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 126f03dd90..f2fbc1dddf 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -17,34 +17,11 @@ # [This file includes modifications made by New Vector Limited] # # -from enum import Enum -from typing import TYPE_CHECKING, Dict, Final, List, Mapping, Optional, Sequence, Tuple -import attr -from typing_extensions import TypedDict -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import Extra -else: - from pydantic import Extra +from typing import List, Optional, TypedDict from synapse.api.constants import EventTypes -from synapse.events import EventBase -from synapse.types import ( - DeviceListUpdates, - JsonDict, - JsonMapping, - Requester, - SlidingSyncStreamToken, - StreamToken, - UserID, -) -from synapse.types.rest.client import SlidingSyncBody - -if TYPE_CHECKING: - from synapse.handlers.relations import BundledAggregations # Sliding Sync: The event types that clients should consider as new activity and affect # the `bump_stamp` @@ -114,335 +91,3 @@ class ShutdownRoomResponse(TypedDict): failed_to_kick_users: List[str] local_aliases: List[str] new_room_id: Optional[str] - - -class SlidingSyncConfig(SlidingSyncBody): - """ - Inherit from `SlidingSyncBody` since we need all of the same fields and add a few - extra fields that we need in the handler - """ - - user: UserID - requester: Requester - - # Pydantic config - class Config: - # By default, ignore fields that we don't recognise. - extra = Extra.ignore - # By default, don't allow fields to be reassigned after parsing. - allow_mutation = False - # Allow custom types like `UserID` to be used in the model - arbitrary_types_allowed = True - - -class OperationType(Enum): - """ - Represents the operation types in a Sliding Sync window. - - Attributes: - SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about - entries in this range. - INSERT: Sets a single entry. If the position is not empty then clients MUST move - entries to the left or the right depending on where the closest empty space is. - DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move - places. - INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for - offline support, but they should be treated as empty when additional operations - which concern indexes in the range arrive from the server. - """ - - SYNC: Final = "SYNC" - INSERT: Final = "INSERT" - DELETE: Final = "DELETE" - INVALIDATE: Final = "INVALIDATE" - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class SlidingSyncResult: - """ - The Sliding Sync result to be serialized to JSON for a response. - - Attributes: - next_pos: The next position token in the sliding window to request (next_batch). - lists: Sliding window API. A map of list key to list results. - rooms: Room subscription API. A map of room ID to room results. - extensions: Extensions API. A map of extension key to extension results. - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class RoomResult: - """ - Attributes: - name: Room name or calculated room name. - avatar: Room avatar - heroes: List of stripped membership events (containing `user_id` and optionally - `avatar_url` and `displayname`) for the users used to calculate the room name. - is_dm: Flag to specify whether the room is a direct-message room (most likely - between two people). - initial: Flag which is set when this is the first time the server is sending this - data on this connection. Clients can use this flag to replace or update - their local state. When there is an update, servers MUST omit this flag - entirely and NOT send "initial":false as this is wasteful on bandwidth. The - absence of this flag means 'false'. - unstable_expanded_timeline: Flag which is set if we're returning more historic - events due to the timeline limit having increased. See "XXX: Odd behavior" - comment ing `synapse.handlers.sliding_sync`. - required_state: The current state of the room - timeline: Latest events in the room. The last event is the most recent. - bundled_aggregations: A mapping of event ID to the bundled aggregations for - the timeline events above. This allows clients to show accurate reaction - counts (or edits, threads), even if some of the reaction events were skipped - over in a gappy sync. - stripped_state: Stripped state events (for rooms where the usre is - invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2, - absent on joined/left rooms - prev_batch: A token that can be passed as a start parameter to the - `/rooms//messages` API to retrieve earlier messages. - limited: True if there are more events than `timeline_limit` looking - backwards from the `response.pos` to the `request.pos`. - num_live: The number of timeline events which have just occurred and are not historical. - The last N events are 'live' and should be treated as such. This is mostly - useful to determine whether a given @mention event should make a noise or not. - Clients cannot rely solely on the absence of `initial: true` to determine live - events because if a room not in the sliding window bumps into the window because - of an @mention it will have `initial: true` yet contain a single live event - (with potentially other old events in the timeline). - bump_stamp: The `stream_ordering` of the last event according to the - `bump_event_types`. This helps clients sort more readily without them - needing to pull in a bunch of the timeline to determine the last activity. - `bump_event_types` is a thing because for example, we don't want display - name changes to mark the room as unread and bump it to the top. For - encrypted rooms, we just have to consider any activity as a bump because we - can't see the content and the client has to figure it out for themselves. - joined_count: The number of users with membership of join, including the client's - own user ID. (same as sync `v2 m.joined_member_count`) - invited_count: The number of users with membership of invite. (same as sync v2 - `m.invited_member_count`) - notification_count: The total number of unread notifications for this room. (same - as sync v2) - highlight_count: The number of unread notifications for this room with the highlight - flag set. (same as sync v2) - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class StrippedHero: - user_id: str - display_name: Optional[str] - avatar_url: Optional[str] - - name: Optional[str] - avatar: Optional[str] - heroes: Optional[List[StrippedHero]] - is_dm: bool - initial: bool - unstable_expanded_timeline: bool - # Should be empty for invite/knock rooms with `stripped_state` - required_state: List[EventBase] - # Should be empty for invite/knock rooms with `stripped_state` - timeline_events: List[EventBase] - bundled_aggregations: Optional[Dict[str, "BundledAggregations"]] - # Optional because it's only relevant to invite/knock rooms - stripped_state: List[JsonDict] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - prev_batch: Optional[StreamToken] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - limited: Optional[bool] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - num_live: Optional[int] - bump_stamp: int - joined_count: int - invited_count: int - notification_count: int - highlight_count: int - - def __bool__(self) -> bool: - return ( - # If this is the first time the client is seeing the room, we should not filter it out - # under any circumstance. - self.initial - # We need to let the client know if there are any new events - or bool(self.required_state) - or bool(self.timeline_events) - or bool(self.stripped_state) - ) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class SlidingWindowList: - """ - Attributes: - count: The total number of entries in the list. Always present if this list - is. - ops: The sliding list operations to perform. - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class Operation: - """ - Attributes: - op: The operation type to perform. - range: Which index positions are affected by this operation. These are - both inclusive. - room_ids: Which room IDs are affected by this operation. These IDs match - up to the positions in the `range`, so the last room ID in this list - matches the 9th index. The room data is held in a separate object. - """ - - op: OperationType - range: Tuple[int, int] - room_ids: List[str] - - count: int - ops: List[Operation] - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class Extensions: - """Responses for extensions - - Attributes: - to_device: The to-device extension (MSC3885) - e2ee: The E2EE device extension (MSC3884) - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class ToDeviceExtension: - """The to-device extension (MSC3885) - - Attributes: - next_batch: The to-device stream token the client should use - to get more results - events: A list of to-device messages for the client - """ - - next_batch: str - events: Sequence[JsonMapping] - - def __bool__(self) -> bool: - return bool(self.events) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class E2eeExtension: - """The E2EE device extension (MSC3884) - - Attributes: - device_list_updates: List of user_ids whose devices have changed or left (only - present on incremental syncs). - device_one_time_keys_count: Map from key algorithm to the number of - unclaimed one-time keys currently held on the server for this device. If - an algorithm is unlisted, the count for that algorithm is assumed to be - zero. If this entire parameter is missing, the count for all algorithms - is assumed to be zero. - device_unused_fallback_key_types: List of unused fallback key algorithms - for this device. - """ - - # Only present on incremental syncs - device_list_updates: Optional[DeviceListUpdates] - device_one_time_keys_count: Mapping[str, int] - device_unused_fallback_key_types: Sequence[str] - - def __bool__(self) -> bool: - # Note that "signed_curve25519" is always returned in key count responses - # regardless of whether we uploaded any keys for it. This is necessary until - # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. - # - # Also related: - # https://github.com/element-hq/element-android/issues/3725 and - # https://github.com/matrix-org/synapse/issues/10456 - default_otk = self.device_one_time_keys_count.get("signed_curve25519") - more_than_default_otk = len(self.device_one_time_keys_count) > 1 or ( - default_otk is not None and default_otk > 0 - ) - - return bool( - more_than_default_otk - or self.device_list_updates - or self.device_unused_fallback_key_types - ) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class AccountDataExtension: - """The Account Data extension (MSC3959) - - Attributes: - global_account_data_map: Mapping from `type` to `content` of global account - data events. - account_data_by_room_map: Mapping from room_id to mapping of `type` to - `content` of room account data events. - """ - - global_account_data_map: Mapping[str, JsonMapping] - account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] - - def __bool__(self) -> bool: - return bool( - self.global_account_data_map or self.account_data_by_room_map - ) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class ReceiptsExtension: - """The Receipts extension (MSC3960) - - Attributes: - room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral - event (type, content) - """ - - room_id_to_receipt_map: Mapping[str, JsonMapping] - - def __bool__(self) -> bool: - return bool(self.room_id_to_receipt_map) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class TypingExtension: - """The Typing Notification extension (MSC3961) - - Attributes: - room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral - event (type, content) - """ - - room_id_to_typing_map: Mapping[str, JsonMapping] - - def __bool__(self) -> bool: - return bool(self.room_id_to_typing_map) - - to_device: Optional[ToDeviceExtension] = None - e2ee: Optional[E2eeExtension] = None - account_data: Optional[AccountDataExtension] = None - receipts: Optional[ReceiptsExtension] = None - typing: Optional[TypingExtension] = None - - def __bool__(self) -> bool: - return bool( - self.to_device - or self.e2ee - or self.account_data - or self.receipts - or self.typing - ) - - next_pos: SlidingSyncStreamToken - lists: Mapping[str, SlidingWindowList] - rooms: Dict[str, RoomResult] - extensions: Extensions - - def __bool__(self) -> bool: - """Make the result appear empty if there are no updates. This is used - to tell if the notifier needs to wait for more events when polling for - events. - """ - # We don't include `self.lists` here, as a) `lists` is always non-empty even if - # there are no changes, and b) since we're sorting rooms by `stream_ordering` of - # the latest activity, anything that would cause the order to change would end - # up in `self.rooms` and cause us to send down the change. - return bool(self.rooms or self.extensions) - - @staticmethod - def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult": - "Return a new empty result" - return SlidingSyncResult( - next_pos=next_pos, - lists={}, - rooms={}, - extensions=SlidingSyncResult.Extensions(), - ) diff --git a/synapse/handlers/sliding_sync/types.py b/synapse/types/handlers/sliding_sync.py similarity index 52% rename from synapse/handlers/sliding_sync/types.py rename to synapse/types/handlers/sliding_sync.py index 003419d40a..bca1ff7b54 100644 --- a/synapse/handlers/sliding_sync/types.py +++ b/synapse/types/handlers/sliding_sync.py @@ -18,30 +18,382 @@ from collections import ChainMap from enum import Enum from typing import ( TYPE_CHECKING, + AbstractSet, Callable, Dict, Final, Generic, + List, Mapping, MutableMapping, Optional, + Sequence, Set, + Tuple, TypeVar, cast, ) import attr +from synapse._pydantic_compat import HAS_PYDANTIC_V2 from synapse.api.constants import EventTypes from synapse.types import MultiWriterStreamToken, RoomStreamToken, StrCollection, UserID -from synapse.types.handlers import SlidingSyncConfig + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import Extra +else: + from pydantic import Extra + +from synapse.events import EventBase +from synapse.types import ( + DeviceListUpdates, + JsonDict, + JsonMapping, + Requester, + SlidingSyncStreamToken, + StreamToken, +) +from synapse.types.rest.client import SlidingSyncBody if TYPE_CHECKING: - pass + from synapse.handlers.relations import BundledAggregations logger = logging.getLogger(__name__) +class SlidingSyncConfig(SlidingSyncBody): + """ + Inherit from `SlidingSyncBody` since we need all of the same fields and add a few + extra fields that we need in the handler + """ + + user: UserID + requester: Requester + + # Pydantic config + class Config: + # By default, ignore fields that we don't recognise. + extra = Extra.ignore + # By default, don't allow fields to be reassigned after parsing. + allow_mutation = False + # Allow custom types like `UserID` to be used in the model + arbitrary_types_allowed = True + + +class OperationType(Enum): + """ + Represents the operation types in a Sliding Sync window. + + Attributes: + SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about + entries in this range. + INSERT: Sets a single entry. If the position is not empty then clients MUST move + entries to the left or the right depending on where the closest empty space is. + DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move + places. + INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for + offline support, but they should be treated as empty when additional operations + which concern indexes in the range arrive from the server. + """ + + SYNC: Final = "SYNC" + INSERT: Final = "INSERT" + DELETE: Final = "DELETE" + INVALIDATE: Final = "INVALIDATE" + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SlidingSyncResult: + """ + The Sliding Sync result to be serialized to JSON for a response. + + Attributes: + next_pos: The next position token in the sliding window to request (next_batch). + lists: Sliding window API. A map of list key to list results. + rooms: Room subscription API. A map of room ID to room results. + extensions: Extensions API. A map of extension key to extension results. + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class RoomResult: + """ + Attributes: + name: Room name or calculated room name. + avatar: Room avatar + heroes: List of stripped membership events (containing `user_id` and optionally + `avatar_url` and `displayname`) for the users used to calculate the room name. + is_dm: Flag to specify whether the room is a direct-message room (most likely + between two people). + initial: Flag which is set when this is the first time the server is sending this + data on this connection. Clients can use this flag to replace or update + their local state. When there is an update, servers MUST omit this flag + entirely and NOT send "initial":false as this is wasteful on bandwidth. The + absence of this flag means 'false'. + unstable_expanded_timeline: Flag which is set if we're returning more historic + events due to the timeline limit having increased. See "XXX: Odd behavior" + comment ing `synapse.handlers.sliding_sync`. + required_state: The current state of the room + timeline: Latest events in the room. The last event is the most recent. + bundled_aggregations: A mapping of event ID to the bundled aggregations for + the timeline events above. This allows clients to show accurate reaction + counts (or edits, threads), even if some of the reaction events were skipped + over in a gappy sync. + stripped_state: Stripped state events (for rooms where the usre is + invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2, + absent on joined/left rooms + prev_batch: A token that can be passed as a start parameter to the + `/rooms//messages` API to retrieve earlier messages. + limited: True if there are more events than `timeline_limit` looking + backwards from the `response.pos` to the `request.pos`. + num_live: The number of timeline events which have just occurred and are not historical. + The last N events are 'live' and should be treated as such. This is mostly + useful to determine whether a given @mention event should make a noise or not. + Clients cannot rely solely on the absence of `initial: true` to determine live + events because if a room not in the sliding window bumps into the window because + of an @mention it will have `initial: true` yet contain a single live event + (with potentially other old events in the timeline). + bump_stamp: The `stream_ordering` of the last event according to the + `bump_event_types`. This helps clients sort more readily without them + needing to pull in a bunch of the timeline to determine the last activity. + `bump_event_types` is a thing because for example, we don't want display + name changes to mark the room as unread and bump it to the top. For + encrypted rooms, we just have to consider any activity as a bump because we + can't see the content and the client has to figure it out for themselves. + joined_count: The number of users with membership of join, including the client's + own user ID. (same as sync `v2 m.joined_member_count`) + invited_count: The number of users with membership of invite. (same as sync v2 + `m.invited_member_count`) + notification_count: The total number of unread notifications for this room. (same + as sync v2) + highlight_count: The number of unread notifications for this room with the highlight + flag set. (same as sync v2) + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class StrippedHero: + user_id: str + display_name: Optional[str] + avatar_url: Optional[str] + + name: Optional[str] + avatar: Optional[str] + heroes: Optional[List[StrippedHero]] + is_dm: bool + initial: bool + unstable_expanded_timeline: bool + # Should be empty for invite/knock rooms with `stripped_state` + required_state: List[EventBase] + # Should be empty for invite/knock rooms with `stripped_state` + timeline_events: List[EventBase] + bundled_aggregations: Optional[Dict[str, "BundledAggregations"]] + # Optional because it's only relevant to invite/knock rooms + stripped_state: List[JsonDict] + # Only optional because it won't be included for invite/knock rooms with `stripped_state` + prev_batch: Optional[StreamToken] + # Only optional because it won't be included for invite/knock rooms with `stripped_state` + limited: Optional[bool] + # Only optional because it won't be included for invite/knock rooms with `stripped_state` + num_live: Optional[int] + bump_stamp: int + joined_count: int + invited_count: int + notification_count: int + highlight_count: int + + def __bool__(self) -> bool: + return ( + # If this is the first time the client is seeing the room, we should not filter it out + # under any circumstance. + self.initial + # We need to let the client know if there are any new events + or bool(self.required_state) + or bool(self.timeline_events) + or bool(self.stripped_state) + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class SlidingWindowList: + """ + Attributes: + count: The total number of entries in the list. Always present if this list + is. + ops: The sliding list operations to perform. + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class Operation: + """ + Attributes: + op: The operation type to perform. + range: Which index positions are affected by this operation. These are + both inclusive. + room_ids: Which room IDs are affected by this operation. These IDs match + up to the positions in the `range`, so the last room ID in this list + matches the 9th index. The room data is held in a separate object. + """ + + op: OperationType + range: Tuple[int, int] + room_ids: List[str] + + count: int + ops: List[Operation] + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class Extensions: + """Responses for extensions + + Attributes: + to_device: The to-device extension (MSC3885) + e2ee: The E2EE device extension (MSC3884) + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ToDeviceExtension: + """The to-device extension (MSC3885) + + Attributes: + next_batch: The to-device stream token the client should use + to get more results + events: A list of to-device messages for the client + """ + + next_batch: str + events: Sequence[JsonMapping] + + def __bool__(self) -> bool: + return bool(self.events) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class E2eeExtension: + """The E2EE device extension (MSC3884) + + Attributes: + device_list_updates: List of user_ids whose devices have changed or left (only + present on incremental syncs). + device_one_time_keys_count: Map from key algorithm to the number of + unclaimed one-time keys currently held on the server for this device. If + an algorithm is unlisted, the count for that algorithm is assumed to be + zero. If this entire parameter is missing, the count for all algorithms + is assumed to be zero. + device_unused_fallback_key_types: List of unused fallback key algorithms + for this device. + """ + + # Only present on incremental syncs + device_list_updates: Optional[DeviceListUpdates] + device_one_time_keys_count: Mapping[str, int] + device_unused_fallback_key_types: Sequence[str] + + def __bool__(self) -> bool: + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + default_otk = self.device_one_time_keys_count.get("signed_curve25519") + more_than_default_otk = len(self.device_one_time_keys_count) > 1 or ( + default_otk is not None and default_otk > 0 + ) + + return bool( + more_than_default_otk + or self.device_list_updates + or self.device_unused_fallback_key_types + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class AccountDataExtension: + """The Account Data extension (MSC3959) + + Attributes: + global_account_data_map: Mapping from `type` to `content` of global account + data events. + account_data_by_room_map: Mapping from room_id to mapping of `type` to + `content` of room account data events. + """ + + global_account_data_map: Mapping[str, JsonMapping] + account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] + + def __bool__(self) -> bool: + return bool( + self.global_account_data_map or self.account_data_by_room_map + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ReceiptsExtension: + """The Receipts extension (MSC3960) + + Attributes: + room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral + event (type, content) + """ + + room_id_to_receipt_map: Mapping[str, JsonMapping] + + def __bool__(self) -> bool: + return bool(self.room_id_to_receipt_map) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class TypingExtension: + """The Typing Notification extension (MSC3961) + + Attributes: + room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral + event (type, content) + """ + + room_id_to_typing_map: Mapping[str, JsonMapping] + + def __bool__(self) -> bool: + return bool(self.room_id_to_typing_map) + + to_device: Optional[ToDeviceExtension] = None + e2ee: Optional[E2eeExtension] = None + account_data: Optional[AccountDataExtension] = None + receipts: Optional[ReceiptsExtension] = None + typing: Optional[TypingExtension] = None + + def __bool__(self) -> bool: + return bool( + self.to_device + or self.e2ee + or self.account_data + or self.receipts + or self.typing + ) + + next_pos: SlidingSyncStreamToken + lists: Mapping[str, SlidingWindowList] + rooms: Dict[str, RoomResult] + extensions: Extensions + + def __bool__(self) -> bool: + """Make the result appear empty if there are no updates. This is used + to tell if the notifier needs to wait for more events when polling for + events. + """ + # We don't include `self.lists` here, as a) `lists` is always non-empty even if + # there are no changes, and b) since we're sorting rooms by `stream_ordering` of + # the latest activity, anything that would cause the order to change would end + # up in `self.rooms` and cause us to send down the change. + return bool(self.rooms or self.extensions) + + @staticmethod + def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult": + "Return a new empty result" + return SlidingSyncResult( + next_pos=next_pos, + lists={}, + rooms={}, + extensions=SlidingSyncResult.Extensions(), + ) + + class StateValues: """ Understood values of the (type, state_key) tuple in `required_state`. @@ -60,7 +412,7 @@ class StateValues: # We can't freeze this class because we want to update it in place with the # de-duplicated data. -@attr.s(slots=True, auto_attribs=True) +@attr.s(slots=True, auto_attribs=True, frozen=True) class RoomSyncConfig: """ Holds the config for what data we should fetch for a room in the sync response. @@ -74,7 +426,7 @@ class RoomSyncConfig: """ timeline_limit: int - required_state_map: Dict[str, Set[str]] + required_state_map: Mapping[str, AbstractSet[str]] @classmethod def from_room_config( @@ -146,27 +498,22 @@ class RoomSyncConfig: required_state_map=required_state_map, ) - def deep_copy(self) -> "RoomSyncConfig": - required_state_map: Dict[str, Set[str]] = { - state_type: state_key_set.copy() - for state_type, state_key_set in self.required_state_map.items() - } - - return RoomSyncConfig( - timeline_limit=self.timeline_limit, - required_state_map=required_state_map, - ) - def combine_room_sync_config( self, other_room_sync_config: "RoomSyncConfig" - ) -> None: + ) -> "RoomSyncConfig": """ - Combine this `RoomSyncConfig` with another `RoomSyncConfig` and take the + Combine this `RoomSyncConfig` with another `RoomSyncConfig` and return the superset union of the two. """ + timeline_limit = self.timeline_limit + required_state_map = { + event_type: set(state_keys) + for event_type, state_keys in self.required_state_map.items() + } + # Take the highest timeline limit - if self.timeline_limit < other_room_sync_config.timeline_limit: - self.timeline_limit = other_room_sync_config.timeline_limit + if timeline_limit < other_room_sync_config.timeline_limit: + timeline_limit = other_room_sync_config.timeline_limit # Union the required state for ( @@ -175,14 +522,14 @@ class RoomSyncConfig: ) in other_room_sync_config.required_state_map.items(): # If we already have a wildcard for everything, we don't need to add # anything else - if StateValues.WILDCARD in self.required_state_map.get( + if StateValues.WILDCARD in required_state_map.get( StateValues.WILDCARD, set() ): break # If we already have a wildcard `state_key` for this `state_type`, we don't need # to add anything else - if StateValues.WILDCARD in self.required_state_map.get(state_type, set()): + if StateValues.WILDCARD in required_state_map.get(state_type, set()): continue # If we're getting wildcards for the `state_type` and `state_key`, that's @@ -191,16 +538,14 @@ class RoomSyncConfig: state_type == StateValues.WILDCARD and StateValues.WILDCARD in state_key_set ): - self.required_state_map = {state_type: {StateValues.WILDCARD}} + required_state_map = {state_type: {StateValues.WILDCARD}} # We can break, since we don't need to add anything else break for state_key in state_key_set: # If we already have a wildcard for this specific `state_key`, we don't need # to add it since the wildcard already covers it. - if state_key in self.required_state_map.get( - StateValues.WILDCARD, set() - ): + if state_key in required_state_map.get(StateValues.WILDCARD, set()): continue # If we're getting a wildcard for the `state_type`, get rid of any other @@ -211,7 +556,7 @@ class RoomSyncConfig: # Make a copy so we don't run into an error: `dictionary changed size # during iteration`, when we remove items for existing_state_type, existing_state_key_set in list( - self.required_state_map.items() + required_state_map.items() ): # Make a copy so we don't run into an error: `Set changed size during # iteration`, when we filter out and remove items @@ -221,19 +566,21 @@ class RoomSyncConfig: # If we've the left the `set()` empty, remove it from the map if existing_state_key_set == set(): - self.required_state_map.pop(existing_state_type, None) + required_state_map.pop(existing_state_type, None) # If we're getting a wildcard `state_key`, get rid of any other state_keys # for this `state_type` since the wildcard will cover it already. if state_key == StateValues.WILDCARD: - self.required_state_map[state_type] = {state_key} + required_state_map[state_type] = {state_key} break # Otherwise, just add it to the set else: - if self.required_state_map.get(state_type) is None: - self.required_state_map[state_type] = {state_key} + if required_state_map.get(state_type) is None: + required_state_map[state_type] = {state_key} else: - self.required_state_map[state_type].add(state_key) + required_state_map[state_type].add(state_key) + + return RoomSyncConfig(timeline_limit, required_state_map) def must_await_full_state( self, @@ -324,7 +671,7 @@ class HaveSentRoomFlag(Enum): LIVE = "live" -T = TypeVar("T") +T = TypeVar("T", str, RoomStreamToken, MultiWriterStreamToken) @attr.s(auto_attribs=True, slots=True, frozen=True) @@ -439,7 +786,7 @@ class MutableRoomStatusMap(RoomStatusMap[T]): self._statuses[room_id] = HaveSentRoom.previously(from_token) -@attr.s(auto_attribs=True) +@attr.s(auto_attribs=True, frozen=True) class PerConnectionState: """The per-connection state. A snapshot of what we've sent down the connection before. diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 2cf2f2982f..2ef9f665f9 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -18,7 +18,6 @@ # # import logging -from copy import deepcopy from typing import Dict, List, Optional from unittest.mock import patch @@ -47,7 +46,7 @@ from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import JsonDict, StreamToken, UserID -from synapse.types.handlers import SlidingSyncConfig +from synapse.types.handlers.sliding_sync import SlidingSyncConfig from synapse.util import Clock from tests.replication._base import BaseMultiWorkerStreamTestCase @@ -566,23 +565,11 @@ class RoomSyncConfigTestCase(TestCase): """ Combine A into B and B into A to make sure we get the same result. """ - # Since we're mutating these in place, make a copy for each of our trials - room_sync_config_a = deepcopy(a) - room_sync_config_b = deepcopy(b) + combined_config = a.combine_room_sync_config(b) + self._assert_room_config_equal(combined_config, expected, "B into A") - # Combine B into A - room_sync_config_a.combine_room_sync_config(room_sync_config_b) - - self._assert_room_config_equal(room_sync_config_a, expected, "B into A") - - # Since we're mutating these in place, make a copy for each of our trials - room_sync_config_a = deepcopy(a) - room_sync_config_b = deepcopy(b) - - # Combine A into B - room_sync_config_b.combine_room_sync_config(room_sync_config_a) - - self._assert_room_config_equal(room_sync_config_b, expected, "A into B") + combined_config = a.combine_room_sync_config(b) + self._assert_room_config_equal(combined_config, expected, "A into B") class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): From e43c2b023e15f065be83c37d1766d76b4c18ad30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Aug 2024 16:26:58 +0100 Subject: [PATCH 170/332] Sliding sync: Store the per-connection state in the database. (#17599) Based on #17600 --------- Co-authored-by: Eric Eastwood --- changelog.d/17599.misc | 1 + synapse/app/generic_worker.py | 2 + synapse/handlers/sliding_sync/__init__.py | 9 +- synapse/handlers/sliding_sync/store.py | 140 ++--- synapse/storage/database.py | 43 ++ synapse/storage/databases/main/__init__.py | 2 + .../storage/databases/main/sliding_sync.py | 491 ++++++++++++++++++ synapse/storage/engines/_base.py | 5 + synapse/storage/engines/postgres.py | 7 + synapse/storage/engines/sqlite.py | 6 + synapse/storage/schema/__init__.py | 3 + .../main/delta/87/02_per_connection_state.sql | 81 +++ synapse/types/handlers/sliding_sync.py | 6 + .../sliding_sync/test_rooms_required_state.py | 10 +- 14 files changed, 691 insertions(+), 115 deletions(-) create mode 100644 changelog.d/17599.misc create mode 100644 synapse/storage/databases/main/sliding_sync.py create mode 100644 synapse/storage/schema/main/delta/87/02_per_connection_state.sql diff --git a/changelog.d/17599.misc b/changelog.d/17599.misc new file mode 100644 index 0000000000..2f81356d12 --- /dev/null +++ b/changelog.d/17599.misc @@ -0,0 +1 @@ +Store sliding sync per-connection state in the database. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 53f1859256..18d294f2b2 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -98,6 +98,7 @@ from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.storage.databases.main.search import SearchStore from synapse.storage.databases.main.session import SessionStore from synapse.storage.databases.main.signatures import SignatureWorkerStore +from synapse.storage.databases.main.sliding_sync import SlidingSyncStore from synapse.storage.databases.main.state import StateGroupWorkerStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.stream import StreamWorkerStore @@ -159,6 +160,7 @@ class GenericWorkerStore( SessionStore, TaskSchedulerWorkerStore, ExperimentalFeaturesStore, + SlidingSyncStore, ): # Properties that multiple storage classes define. Tell mypy what the # expected type is. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 7d4f6415c0..d92bdad307 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -89,7 +89,7 @@ class SlidingSyncHandler: self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync self.is_mine_id = hs.is_mine_id - self.connection_store = SlidingSyncConnectionStore() + self.connection_store = SlidingSyncConnectionStore(self.store) self.extensions = SlidingSyncExtensionHandler(hs) self.room_lists = SlidingSyncRoomLists(hs) @@ -210,16 +210,11 @@ class SlidingSyncHandler: # amount of time (more with round-trips and re-processing) in the end to # get everything again. previous_connection_state = ( - await self.connection_store.get_per_connection_state( + await self.connection_store.get_and_clear_connection_positions( sync_config, from_token ) ) - await self.connection_store.mark_token_seen( - sync_config=sync_config, - from_token=from_token, - ) - # Get all of the room IDs that the user should be able to see in the sync # response has_lists = sync_config.lists is not None and len(sync_config.lists) > 0 diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py index e38fe3556f..d24fccf76f 100644 --- a/synapse/handlers/sliding_sync/store.py +++ b/synapse/handlers/sliding_sync/store.py @@ -13,12 +13,12 @@ # import logging -from typing import TYPE_CHECKING, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Optional import attr -from synapse.api.errors import SlidingSyncUnknownPosition from synapse.logging.opentracing import trace +from synapse.storage.databases.main import DataStore from synapse.types import SlidingSyncStreamToken from synapse.types.handlers.sliding_sync import ( MutablePerConnectionState, @@ -61,22 +61,9 @@ class SlidingSyncConnectionStore: to mapping of room ID to `HaveSentRoom`. """ - # `(user_id, conn_id)` -> `connection_position` -> `PerConnectionState` - _connections: Dict[Tuple[str, str], Dict[int, PerConnectionState]] = attr.Factory( - dict - ) + store: "DataStore" - async def is_valid_token( - self, sync_config: SlidingSyncConfig, connection_token: int - ) -> bool: - """Return whether the connection token is valid/recognized""" - if connection_token == 0: - return True - - conn_key = self._get_connection_key(sync_config) - return connection_token in self._connections.get(conn_key, {}) - - async def get_per_connection_state( + async def get_and_clear_connection_positions( self, sync_config: SlidingSyncConfig, from_token: Optional[SlidingSyncStreamToken], @@ -86,23 +73,21 @@ class SlidingSyncConnectionStore: Raises: SlidingSyncUnknownPosition if the connection_token is unknown """ - if from_token is None: + # If this is our first request, there is no previous connection state to fetch out of the database + if from_token is None or from_token.connection_position == 0: return PerConnectionState() - connection_position = from_token.connection_position - if connection_position == 0: - # Initial sync (request without a `from_token`) starts at `0` so - # there is no existing per-connection state - return PerConnectionState() + conn_id = sync_config.conn_id or "" - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.get(conn_key, {}) - connection_state = sync_statuses.get(connection_position) + device_id = sync_config.requester.device_id + assert device_id is not None - if connection_state is None: - raise SlidingSyncUnknownPosition() - - return connection_state + return await self.store.get_and_clear_connection_positions( + sync_config.user.to_string(), + device_id, + conn_id, + from_token.connection_position, + ) @trace async def record_new_state( @@ -116,85 +101,28 @@ class SlidingSyncConnectionStore: If there are no changes to the state this may return the same token as the existing per-connection state. """ - prev_connection_token = 0 - if from_token is not None: - prev_connection_token = from_token.connection_position - if not new_connection_state.has_updates(): - return prev_connection_token + if from_token is not None: + return from_token.connection_position + else: + return 0 - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.setdefault(conn_key, {}) + # A from token with a zero connection position means there was no + # previously stored connection state, so we treat a zero the same as + # there being no previous position. + previous_connection_position = None + if from_token is not None and from_token.connection_position != 0: + previous_connection_position = from_token.connection_position - # Generate a new token, removing any existing entries in that token - # (which can happen if requests get resent). - new_store_token = prev_connection_token + 1 - sync_statuses.pop(new_store_token, None) - - # We copy the `MutablePerConnectionState` so that the inner `ChainMap`s - # don't grow forever. - sync_statuses[new_store_token] = new_connection_state.copy() - - return new_store_token - - @trace - async def mark_token_seen( - self, - sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken], - ) -> None: - """We have received a request with the given token, so we can clear out - any other tokens associated with the connection. - - If there is no from token then we have started afresh, and so we delete - all tokens associated with the device. - """ - # Clear out any tokens for the connection that doesn't match the one - # from the request. - - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.pop(conn_key, {}) - if from_token is None: - return - - sync_statuses = { - connection_token: room_statuses - for connection_token, room_statuses in sync_statuses.items() - if connection_token == from_token.connection_position - } - if sync_statuses: - self._connections[conn_key] = sync_statuses - - @staticmethod - def _get_connection_key(sync_config: SlidingSyncConfig) -> Tuple[str, str]: - """Return a unique identifier for this connection. - - The first part is simply the user ID. - - The second part is generally a combination of device ID and conn_id. - However, both these two are optional (e.g. puppet access tokens don't - have device IDs), so this handles those edge cases. - - We use this over the raw `conn_id` to avoid clashes between different - clients that use the same `conn_id`. Imagine a user uses a web client - that uses `conn_id: main_sync_loop` and an Android client that also has - a `conn_id: main_sync_loop`. - """ - - user_id = sync_config.user.to_string() - - # Only one sliding sync connection is allowed per given conn_id (empty - # or not). conn_id = sync_config.conn_id or "" - if sync_config.requester.device_id: - return (user_id, f"D/{sync_config.requester.device_id}/{conn_id}") + device_id = sync_config.requester.device_id + assert device_id is not None - if sync_config.requester.access_token_id: - # If we don't have a device, then the access token ID should be a - # stable ID. - return (user_id, f"A/{sync_config.requester.access_token_id}/{conn_id}") - - # If we have neither then its likely an AS or some weird token. Either - # way we can just fail here. - raise Exception("Cannot use sliding sync with access token type") + return await self.store.persist_per_connection_state( + sync_config.user.to_string(), + device_id, + conn_id, + previous_connection_position, + new_connection_state, + ) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index d666039120..cb4a5857be 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -65,6 +65,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor, SQLQueryParameters +from synapse.types import StrCollection from synapse.util.async_helpers import delay_cancellation from synapse.util.iterutils import batch_iter @@ -1096,6 +1097,48 @@ class DatabasePool: txn.execute(sql, vals) + @staticmethod + def simple_insert_returning_txn( + txn: LoggingTransaction, + table: str, + values: Dict[str, Any], + returning: StrCollection, + ) -> Tuple[Any, ...]: + """Executes a `INSERT INTO... RETURNING...` statement (or equivalent for + SQLite versions that don't support it). + """ + + if txn.database_engine.supports_returning: + sql = "INSERT INTO %s (%s) VALUES(%s) RETURNING %s" % ( + table, + ", ".join(k for k in values.keys()), + ", ".join("?" for _ in values.keys()), + ", ".join(k for k in returning), + ) + + txn.execute(sql, list(values.values())) + row = txn.fetchone() + assert row is not None + return row + else: + # For old versions of SQLite we do a standard insert and then can + # use `last_insert_rowid` to get at the row we just inserted + DatabasePool.simple_insert_txn( + txn, + table=table, + values=values, + ) + txn.execute("SELECT last_insert_rowid()") + row = txn.fetchone() + assert row is not None + (rowid,) = row + + row = DatabasePool.simple_select_one_txn( + txn, table=table, keyvalues={"rowid": rowid}, retcols=returning + ) + assert row is not None + return row + async def simple_insert_many( self, table: str, diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 586e84f2a4..9a43ab63e8 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -33,6 +33,7 @@ from synapse.storage.database import ( LoggingDatabaseConnection, LoggingTransaction, ) +from synapse.storage.databases.main.sliding_sync import SlidingSyncStore from synapse.storage.databases.main.stats import UserSortOrder from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.types import Cursor @@ -156,6 +157,7 @@ class DataStore( LockStore, SessionStore, TaskSchedulerWorkerStore, + SlidingSyncStore, ): def __init__( self, diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py new file mode 100644 index 0000000000..dc747d7ac0 --- /dev/null +++ b/synapse/storage/databases/main/sliding_sync.py @@ -0,0 +1,491 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + + +import logging +from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, cast + +import attr + +from synapse.api.errors import SlidingSyncUnknownPosition +from synapse.logging.opentracing import log_kv +from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage.database import LoggingTransaction +from synapse.types import MultiWriterStreamToken, RoomStreamToken +from synapse.types.handlers.sliding_sync import ( + HaveSentRoom, + HaveSentRoomFlag, + MutablePerConnectionState, + PerConnectionState, + RoomStatusMap, + RoomSyncConfig, +) +from synapse.util import json_encoder +from synapse.util.caches.descriptors import cached + +if TYPE_CHECKING: + from synapse.storage.databases.main import DataStore + +logger = logging.getLogger(__name__) + + +class SlidingSyncStore(SQLBaseStore): + async def persist_per_connection_state( + self, + user_id: str, + device_id: str, + conn_id: str, + previous_connection_position: Optional[int], + per_connection_state: "MutablePerConnectionState", + ) -> int: + """Persist updates to the per-connection state for a sliding sync + connection. + + Returns: + The connection position of the newly persisted state. + """ + + # This cast is safe because the downstream code only cares about + # `store.get_id_for_instance(...)` and `StreamWorkerStore` is mixed + # alongside `SlidingSyncStore` wherever we create a store. + store = cast("DataStore", self) + + return await self.db_pool.runInteraction( + "persist_per_connection_state", + self.persist_per_connection_state_txn, + user_id=user_id, + device_id=device_id, + conn_id=conn_id, + previous_connection_position=previous_connection_position, + per_connection_state=await PerConnectionStateDB.from_state( + per_connection_state, store + ), + ) + + def persist_per_connection_state_txn( + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + conn_id: str, + previous_connection_position: Optional[int], + per_connection_state: "PerConnectionStateDB", + ) -> int: + # First we fetch (or create) the connection key associated with the + # previous connection position. + if previous_connection_position is not None: + # The `previous_connection_position` is a user-supplied value, so we + # need to make sure that the one they supplied is actually theirs. + sql = """ + SELECT connection_key + FROM sliding_sync_connection_positions + INNER JOIN sliding_sync_connections USING (connection_key) + WHERE + connection_position = ? + AND user_id = ? AND effective_device_id = ? AND conn_id = ? + """ + txn.execute( + sql, (previous_connection_position, user_id, device_id, conn_id) + ) + row = txn.fetchone() + if row is None: + raise SlidingSyncUnknownPosition() + + (connection_key,) = row + else: + # We're restarting the connection, so we clear the previous existing data we + # used to track it. We do this here to ensure that if we get lots of + # one-shot requests we don't stack up lots of entries. We have `ON DELETE + # CASCADE` setup on the dependent tables so this will clear out all the + # associated data. + self.db_pool.simple_delete_txn( + txn, + table="sliding_sync_connections", + keyvalues={ + "user_id": user_id, + "effective_device_id": device_id, + "conn_id": conn_id, + }, + ) + + (connection_key,) = self.db_pool.simple_insert_returning_txn( + txn, + table="sliding_sync_connections", + values={ + "user_id": user_id, + "effective_device_id": device_id, + "conn_id": conn_id, + "created_ts": self._clock.time_msec(), + }, + returning=("connection_key",), + ) + + # Define a new connection position for the updates + (connection_position,) = self.db_pool.simple_insert_returning_txn( + txn, + table="sliding_sync_connection_positions", + values={ + "connection_key": connection_key, + "created_ts": self._clock.time_msec(), + }, + returning=("connection_position",), + ) + + # We need to deduplicate the `required_state` JSON. We do this by + # fetching all JSON associated with the connection and comparing that + # with the updates to `required_state` + + # Dict from required state json -> required state ID + required_state_to_id: Dict[str, int] = {} + if previous_connection_position is not None: + rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_required_state", + keyvalues={"connection_key": connection_key}, + retcols=("required_state_id", "required_state"), + ) + for required_state_id, required_state in rows: + required_state_to_id[required_state] = required_state_id + + room_to_state_ids: Dict[str, int] = {} + unique_required_state: Dict[str, List[str]] = {} + for room_id, room_state in per_connection_state.room_configs.items(): + serialized_state = json_encoder.encode( + # We store the required state as a sorted list of event type / + # state key tuples. + sorted( + (event_type, state_key) + for event_type, state_keys in room_state.required_state_map.items() + for state_key in state_keys + ) + ) + + existing_state_id = required_state_to_id.get(serialized_state) + if existing_state_id is not None: + room_to_state_ids[room_id] = existing_state_id + else: + unique_required_state.setdefault(serialized_state, []).append(room_id) + + # Insert any new `required_state` json we haven't previously seen. + for serialized_required_state, room_ids in unique_required_state.items(): + (required_state_id,) = self.db_pool.simple_insert_returning_txn( + txn, + table="sliding_sync_connection_required_state", + values={ + "connection_key": connection_key, + "required_state": serialized_required_state, + }, + returning=("required_state_id",), + ) + for room_id in room_ids: + room_to_state_ids[room_id] = required_state_id + + # Copy over state from the previous connection position (we'll overwrite + # these rows with any changes). + if previous_connection_position is not None: + sql = """ + INSERT INTO sliding_sync_connection_streams + (connection_position, stream, room_id, room_status, last_token) + SELECT ?, stream, room_id, room_status, last_token + FROM sliding_sync_connection_streams + WHERE connection_position = ? + """ + txn.execute(sql, (connection_position, previous_connection_position)) + + sql = """ + INSERT INTO sliding_sync_connection_room_configs + (connection_position, room_id, timeline_limit, required_state_id) + SELECT ?, room_id, timeline_limit, required_state_id + FROM sliding_sync_connection_room_configs + WHERE connection_position = ? + """ + txn.execute(sql, (connection_position, previous_connection_position)) + + # We now upsert the changes to the various streams. + key_values = [] + value_values = [] + for room_id, have_sent_room in per_connection_state.rooms._statuses.items(): + key_values.append((connection_position, "rooms", room_id)) + value_values.append( + (have_sent_room.status.value, have_sent_room.last_token) + ) + + for room_id, have_sent_room in per_connection_state.receipts._statuses.items(): + key_values.append((connection_position, "receipts", room_id)) + value_values.append( + (have_sent_room.status.value, have_sent_room.last_token) + ) + + self.db_pool.simple_upsert_many_txn( + txn, + table="sliding_sync_connection_streams", + key_names=( + "connection_position", + "stream", + "room_id", + ), + key_values=key_values, + value_names=( + "room_status", + "last_token", + ), + value_values=value_values, + ) + + # ... and upsert changes to the room configs. + keys = [] + values = [] + for room_id, room_config in per_connection_state.room_configs.items(): + keys.append((connection_position, room_id)) + values.append((room_config.timeline_limit, room_to_state_ids[room_id])) + + self.db_pool.simple_upsert_many_txn( + txn, + table="sliding_sync_connection_room_configs", + key_names=( + "connection_position", + "room_id", + ), + key_values=keys, + value_names=( + "timeline_limit", + "required_state_id", + ), + value_values=values, + ) + + return connection_position + + @cached(iterable=True, max_entries=100000) + async def get_and_clear_connection_positions( + self, user_id: str, device_id: str, conn_id: str, connection_position: int + ) -> "PerConnectionState": + """Get the per-connection state for the given connection position.""" + + per_connection_state_db = await self.db_pool.runInteraction( + "get_and_clear_connection_positions", + self._get_and_clear_connection_positions_txn, + user_id=user_id, + device_id=device_id, + conn_id=conn_id, + connection_position=connection_position, + ) + + # This cast is safe because the downstream code only cares about + # `store.get_id_for_instance(...)` and `StreamWorkerStore` is mixed + # alongside `SlidingSyncStore` wherever we create a store. + store = cast("DataStore", self) + + return await per_connection_state_db.to_state(store) + + def _get_and_clear_connection_positions_txn( + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + conn_id: str, + connection_position: int, + ) -> "PerConnectionStateDB": + # The `previous_connection_position` is a user-supplied value, so we + # need to make sure that the one they supplied is actually theirs. + sql = """ + SELECT connection_key + FROM sliding_sync_connection_positions + INNER JOIN sliding_sync_connections USING (connection_key) + WHERE + connection_position = ? + AND user_id = ? AND effective_device_id = ? AND conn_id = ? + """ + txn.execute(sql, (connection_position, user_id, device_id, conn_id)) + row = txn.fetchone() + if row is None: + raise SlidingSyncUnknownPosition() + + (connection_key,) = row + + # Now that we have seen the client has received and used the connection + # position, we can delete all the other connection positions. + sql = """ + DELETE FROM sliding_sync_connection_positions + WHERE connection_key = ? AND connection_position != ? + """ + txn.execute(sql, (connection_key, connection_position)) + + # Fetch and create a mapping from required state ID to the actual + # required state for the connection. + rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_required_state", + keyvalues={"connection_key": connection_key}, + retcols=( + "required_state_id", + "required_state", + ), + ) + + required_state_map: Dict[int, Dict[str, Set[str]]] = {} + for row in rows: + state = required_state_map[row[0]] = {} + for event_type, state_keys in db_to_json(row[1]): + state[event_type] = set(state_keys) + + # Get all the room configs, looking up the required state from the map + # above. + room_config_rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_room_configs", + keyvalues={"connection_position": connection_position}, + retcols=( + "room_id", + "timeline_limit", + "required_state_id", + ), + ) + + room_configs: Dict[str, RoomSyncConfig] = {} + for ( + room_id, + timeline_limit, + required_state_id, + ) in room_config_rows: + room_configs[room_id] = RoomSyncConfig( + timeline_limit=timeline_limit, + required_state_map=required_state_map[required_state_id], + ) + + # Now look up the per-room stream data. + rooms: Dict[str, HaveSentRoom[str]] = {} + receipts: Dict[str, HaveSentRoom[str]] = {} + + receipt_rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_streams", + keyvalues={"connection_position": connection_position}, + retcols=( + "stream", + "room_id", + "room_status", + "last_token", + ), + ) + for stream, room_id, room_status, last_token in receipt_rows: + have_sent_room: HaveSentRoom[str] = HaveSentRoom( + status=HaveSentRoomFlag(room_status), last_token=last_token + ) + if stream == "rooms": + rooms[room_id] = have_sent_room + elif stream == "receipts": + receipts[room_id] = have_sent_room + else: + # For forwards compatibility we ignore unknown streams, as in + # future we want to be able to easily add more stream types. + logger.warning("Unrecognized sliding sync stream in DB %r", stream) + + return PerConnectionStateDB( + rooms=RoomStatusMap(rooms), + receipts=RoomStatusMap(receipts), + room_configs=room_configs, + ) + + +@attr.s(auto_attribs=True, frozen=True) +class PerConnectionStateDB: + """An equivalent to `PerConnectionState` that holds data in a format stored + in the DB. + + The principle difference is that the tokens for the different streams are + serialized to strings. + + When persisting this *only* contains updates to the state. + """ + + rooms: "RoomStatusMap[str]" + receipts: "RoomStatusMap[str]" + + room_configs: Mapping[str, "RoomSyncConfig"] + + @staticmethod + async def from_state( + per_connection_state: "MutablePerConnectionState", store: "DataStore" + ) -> "PerConnectionStateDB": + """Convert from a standard `PerConnectionState`""" + rooms = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await status.last_token.to_string(store) + if status.last_token is not None + else None + ), + ) + for room_id, status in per_connection_state.rooms.get_updates().items() + } + + receipts = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await status.last_token.to_string(store) + if status.last_token is not None + else None + ), + ) + for room_id, status in per_connection_state.receipts.get_updates().items() + } + + log_kv( + { + "rooms": rooms, + "receipts": receipts, + "room_configs": per_connection_state.room_configs.maps[0], + } + ) + + return PerConnectionStateDB( + rooms=RoomStatusMap(rooms), + receipts=RoomStatusMap(receipts), + room_configs=per_connection_state.room_configs.maps[0], + ) + + async def to_state(self, store: "DataStore") -> "PerConnectionState": + """Convert into a standard `PerConnectionState`""" + rooms = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await RoomStreamToken.parse(store, status.last_token) + if status.last_token is not None + else None + ), + ) + for room_id, status in self.rooms._statuses.items() + } + + receipts = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await MultiWriterStreamToken.parse(store, status.last_token) + if status.last_token is not None + else None + ), + ) + for room_id, status in self.receipts._statuses.items() + } + + return PerConnectionState( + rooms=RoomStatusMap(rooms), + receipts=RoomStatusMap(receipts), + room_configs=self.room_configs, + ) diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index ad222e7e2d..9d82c59384 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -28,6 +28,11 @@ if TYPE_CHECKING: from synapse.storage.database import LoggingDatabaseConnection +# A string that will be replaced with the appropriate auto increment directive +# for the database engine, expands to an auto incrementing integer primary key. +AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER = "$%AUTO_INCREMENT_PRIMARY_KEY%$" + + class IsolationLevel(IntEnum): READ_COMMITTED: int = 1 REPEATABLE_READ: int = 2 diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 90641d5a18..8c8c6d0414 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -25,6 +25,7 @@ from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, Tuple, cast import psycopg2.extensions from synapse.storage.engines._base import ( + AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, BaseDatabaseEngine, IncorrectDatabaseSetup, IsolationLevel, @@ -256,4 +257,10 @@ class PostgresEngine( executing the script in its own transaction. The script transaction is left open and it is the responsibility of the caller to commit it. """ + # Replace auto increment placeholder with the appropriate directive + script = script.replace( + AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, + "BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY", + ) + cursor.execute(f"COMMIT; BEGIN TRANSACTION; {script}") diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index b11094c5c1..9d1795ebe5 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -25,6 +25,7 @@ import threading from typing import TYPE_CHECKING, Any, List, Mapping, Optional from synapse.storage.engines import BaseDatabaseEngine +from synapse.storage.engines._base import AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER from synapse.storage.types import Cursor if TYPE_CHECKING: @@ -168,6 +169,11 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]): > first. No other implicit transaction control is performed; any transaction > control must be added to sql_script. """ + # Replace auto increment placeholder with the appropriate directive + script = script.replace( + AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, "INTEGER PRIMARY KEY AUTOINCREMENT" + ) + # The implementation of `executescript` can be found at # https://github.com/python/cpython/blob/3.11/Modules/_sqlite/cursor.c#L1035. cursor.executescript(f"BEGIN TRANSACTION; {script}") diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 316541d818..d8afa6da02 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -146,6 +146,9 @@ Changes in SCHEMA_VERSION = 86 Changes in SCHEMA_VERSION = 87 - Add tables to store Sliding Sync data for quick filtering/sorting (`sliding_sync_joined_rooms`, `sliding_sync_membership_snapshots`) + - Add tables for storing the per-connection state for sliding sync requests: + sliding_sync_connections, sliding_sync_connection_positions, sliding_sync_connection_required_state, + sliding_sync_connection_room_configs, sliding_sync_connection_streams """ diff --git a/synapse/storage/schema/main/delta/87/02_per_connection_state.sql b/synapse/storage/schema/main/delta/87/02_per_connection_state.sql new file mode 100644 index 0000000000..59bc14a2c9 --- /dev/null +++ b/synapse/storage/schema/main/delta/87/02_per_connection_state.sql @@ -0,0 +1,81 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + + +-- Table to track active sliding sync connections. +-- +-- A new connection will be created for every sliding sync request without a +-- `since` token for a given `conn_id` for a device.# +-- +-- Once a new connection is created and used we delete all other connections for +-- the `conn_id`. +CREATE TABLE sliding_sync_connections( + connection_key $%AUTO_INCREMENT_PRIMARY_KEY%$, + user_id TEXT NOT NULL, + -- Generally the device ID, but may be something else for e.g. puppeted accounts. + effective_device_id TEXT NOT NULL, + conn_id TEXT NOT NULL, + created_ts BIGINT NOT NULL +); + +CREATE INDEX sliding_sync_connections_idx ON sliding_sync_connections(user_id, effective_device_id, conn_id); +CREATE INDEX sliding_sync_connections_ts_idx ON sliding_sync_connections(created_ts); + +-- We track per-connection state by associating changes to the state with +-- connection positions. This ensures that we correctly track state even if we +-- see retries of requests. +-- +-- If the client starts a "new" connection (by not specifying a since token), +-- we'll clear out the other connections (to ensure that we don't end up with +-- lots of connection keys). +CREATE TABLE sliding_sync_connection_positions( + connection_position $%AUTO_INCREMENT_PRIMARY_KEY%$, + connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE, + created_ts BIGINT NOT NULL +); + +CREATE INDEX sliding_sync_connection_positions_key ON sliding_sync_connection_positions(connection_key); +CREATE INDEX sliding_sync_connection_positions_ts_idx ON sliding_sync_connection_positions(created_ts); + + +-- To save space we deduplicate the `required_state` json by assigning IDs to +-- different values. +CREATE TABLE sliding_sync_connection_required_state( + required_state_id $%AUTO_INCREMENT_PRIMARY_KEY%$, + connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE, + required_state TEXT NOT NULL -- We store this as a json list of event type / state key tuples. +); + +CREATE INDEX sliding_sync_connection_required_state_conn_pos ON sliding_sync_connection_required_state(connection_key); + + +-- Stores the room configs we have seen for rooms in a connection. +CREATE TABLE sliding_sync_connection_room_configs( + connection_position BIGINT NOT NULL REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE, + room_id TEXT NOT NULL, + timeline_limit BIGINT NOT NULL, + required_state_id BIGINT NOT NULL REFERENCES sliding_sync_connection_required_state(required_state_id) +); + +CREATE UNIQUE INDEX sliding_sync_connection_room_configs_idx ON sliding_sync_connection_room_configs(connection_position, room_id); + +-- Stores what data we have sent for given streams down given connections. +CREATE TABLE sliding_sync_connection_streams( + connection_position BIGINT NOT NULL REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE, + stream TEXT NOT NULL, -- e.g. "events" or "receipts" + room_id TEXT NOT NULL, + room_status TEXT NOT NULL, -- "live" or "previously", i.e. the `HaveSentRoomFlag` value + last_token TEXT -- For "previously" the token for the stream we have sent up to. +); + +CREATE UNIQUE INDEX sliding_sync_connection_streams_idx ON sliding_sync_connection_streams(connection_position, room_id, stream); diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index bca1ff7b54..84a88bf784 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -730,6 +730,9 @@ class RoomStatusMap(Generic[T]): return RoomStatusMap(statuses=dict(self._statuses)) + def __len__(self) -> int: + return len(self._statuses) + class MutableRoomStatusMap(RoomStatusMap[T]): """A mutable version of `RoomStatusMap`""" @@ -831,6 +834,9 @@ class PerConnectionState: room_configs=dict(self.room_configs), ) + def __len__(self) -> int: + return len(self.rooms) + len(self.receipts) + len(self.room_configs) + @attr.s(auto_attribs=True) class MutablePerConnectionState(PerConnectionState): diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index 823e7db569..498c921cbd 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -191,8 +191,14 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): } _, from_token = self.do_sync(sync_body, tok=user1_tok) - # Reset the in-memory cache - self.hs.get_sliding_sync_handler().connection_store._connections.clear() + # Reset the positions + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_connections", + keyvalues={"user_id": user1_id}, + desc="clear_sliding_sync_connections_cache", + ) + ) # Make the Sliding Sync request channel = self.make_request( From bb80894391c5d62ffdf1e6b47e6ea22f264e1124 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Aug 2024 16:58:53 +0100 Subject: [PATCH 171/332] Fix background update for sliding sync (#17631) This reverts commit ab414f2ab8a294fbffb417003eeea0f14bbd6588. Introduced in https://github.com/element-hq/synapse/pull/17599 --- changelog.d/17631.misc | 1 + .../databases/main/events_bg_updates.py | 31 ++++++++++++------- tests/storage/test_sliding_sync_tables.py | 10 +++--- 3 files changed, 24 insertions(+), 18 deletions(-) create mode 100644 changelog.d/17631.misc diff --git a/changelog.d/17631.misc b/changelog.d/17631.misc new file mode 100644 index 0000000000..2f81356d12 --- /dev/null +++ b/changelog.d/17631.misc @@ -0,0 +1 @@ +Store sliding sync per-connection state in the database. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 88ff5aa2df..3160e12bb3 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1961,27 +1961,33 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS return 0 def _find_previous_membership_txn( - txn: LoggingTransaction, event_id: str, user_id: str + txn: LoggingTransaction, room_id: str, user_id: str, stream_ordering: int ) -> Tuple[str, str]: - # Find the previous invite/knock event before the leave event. This - # is done by looking at the auth events of the invite/knock and - # finding the corresponding membership event. + # Find the previous invite/knock event before the leave event txn.execute( """ - SELECT m.event_id, m.membership - FROM event_auth AS a - INNER JOIN room_memberships AS m ON (a.auth_id = m.event_id) - WHERE a.event_id = ? AND m.user_id = ? + SELECT event_id, membership + FROM room_memberships + WHERE + room_id = ? + AND user_id = ? + AND event_stream_ordering < ? + ORDER BY event_stream_ordering DESC + LIMIT 1 """, - (event_id, user_id), + ( + room_id, + user_id, + stream_ordering, + ), ) row = txn.fetchone() # We should see a corresponding previous invite/knock event assert row is not None - previous_event_id, membership = row + event_id, membership = row - return previous_event_id, membership + return event_id, membership # Map from (room_id, user_id) to ... to_insert_membership_snapshots: Dict[ @@ -2097,8 +2103,9 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS await self.db_pool.runInteraction( "sliding_sync_membership_snapshots_bg_update._find_previous_membership", _find_previous_membership_txn, - membership_event_id, + room_id, user_id, + membership_event_stream_ordering, ) ) diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py index 4be098b6f6..d0bbc1c803 100644 --- a/tests/storage/test_sliding_sync_tables.py +++ b/tests/storage/test_sliding_sync_tables.py @@ -270,7 +270,9 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): return invite_room_id, persisted_event def _retract_remote_invite_for_user( - self, user_id: str, remote_room_id: str, invite_event_id: str + self, + user_id: str, + remote_room_id: str, ) -> EventBase: """ Create a fake invite retraction for a remote room and persist it. @@ -283,7 +285,6 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): user_id: The person who was invited and we're going to retract the invite for. remote_room_id: The room ID that the invite was for. - invite_event_id: The event ID of the invite Returns: The persisted leave (kick) event. @@ -297,7 +298,7 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): "origin_server_ts": 1, "type": EventTypes.Member, "content": {"membership": Membership.LEAVE}, - "auth_events": [invite_event_id], + "auth_events": [], "prev_events": [], } @@ -2346,7 +2347,6 @@ class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase): remote_invite_retraction_event = self._retract_remote_invite_for_user( user_id=user1_id, remote_room_id=remote_invite_room_id, - invite_event_id=remote_invite_event.event_id, ) # No one local is joined to the remote room @@ -3580,7 +3580,6 @@ class SlidingSyncTablesBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase): room_id_no_info_leave_event = self._retract_remote_invite_for_user( user_id=user1_id, remote_room_id=room_id_no_info, - invite_event_id=room_id_no_info_invite_event.event_id, ) room_id_with_info_leave_event_response = self.helper.leave( room_id_with_info, user1_id, tok=user1_tok @@ -3588,7 +3587,6 @@ class SlidingSyncTablesBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase): space_room_id_leave_event = self._retract_remote_invite_for_user( user_id=user1_id, remote_room_id=space_room_id, - invite_event_id=space_room_id_invite_event.event_id, ) # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not From d844afdc2970f772589b09e615c757d25645fda9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Aug 2024 19:16:39 +0100 Subject: [PATCH 172/332] Fix background update for sliding sync (find previous membership) (#17632) This reverts commit https://github.com/element-hq/synapse/commit/ab414f2ab8a294fbffb417003eeea0f14bbd6588. Introduced in https://github.com/element-hq/synapse/pull/17512 --- changelog.d/17632.misc | 1 + synapse/storage/databases/main/events_bg_updates.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17632.misc diff --git a/changelog.d/17632.misc b/changelog.d/17632.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17632.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 3160e12bb3..b86f873eba 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1967,12 +1967,13 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS txn.execute( """ SELECT event_id, membership - FROM room_memberships + FROM room_memberships AS m + INNER JOIN events AS e USING (room_id, event_id) WHERE room_id = ? - AND user_id = ? - AND event_stream_ordering < ? - ORDER BY event_stream_ordering DESC + AND m.user_id = ? + AND e.stream_ordering < ? + ORDER BY e.stream_ordering DESC LIMIT 1 """, ( From 26f81fb5be5c090af70986815a2795ea0713eede Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 30 Aug 2024 02:53:57 -0500 Subject: [PATCH 173/332] Sliding Sync: Fix outlier re-persisting causing problems with sliding sync tables (#17635) Fix outlier re-persisting causing problems with sliding sync tables Follow-up to https://github.com/element-hq/synapse/pull/17512 When running on `matrix.org`, we discovered that a remote invite is first persisted as an `outlier` and then re-persisted again where it is de-outliered. The first the time, the `outlier` is persisted with one `stream_ordering` but when persisted again and de-outliered, it is assigned a different `stream_ordering` that won't end up being used. Since we call `_calculate_sliding_sync_table_changes()` before `_update_outliers_txn()` which fixes this discrepancy (always use the `stream_ordering` from the first time it was persisted), we're working with an unreliable `stream_ordering` value that will possibly be unused and not make it into the `events` table. --- changelog.d/17635.misc | 1 + synapse/api/constants.py | 2 + synapse/storage/databases/main/events.py | 243 +++++++++--------- .../databases/main/events_bg_updates.py | 28 +- tests/storage/test_sliding_sync_tables.py | 123 +++++++++ 5 files changed, 263 insertions(+), 134 deletions(-) create mode 100644 changelog.d/17635.misc diff --git a/changelog.d/17635.misc b/changelog.d/17635.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17635.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 8e3b404aed..8db302b3d8 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -230,6 +230,8 @@ class EventContentFields: ROOM_NAME: Final = "name" + MEMBERSHIP: Final = "membership" + # Used in m.room.guest_access events. GUEST_ACCESS: Final = "guest_access" diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 60c92e5804..f3dbe5bba7 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -163,6 +163,15 @@ class SlidingSyncMembershipInfo: sender: str membership_event_id: str membership: str + + +@attr.s(slots=True, auto_attribs=True) +class SlidingSyncMembershipInfoWithEventPos(SlidingSyncMembershipInfo): + """ + SlidingSyncMembershipInfo + `stream_ordering`/`instance_name` of the membership + event + """ + membership_event_stream_ordering: int membership_event_instance_name: str @@ -170,17 +179,6 @@ class SlidingSyncMembershipInfo: @attr.s(slots=True, auto_attribs=True) class SlidingSyncTableChanges: room_id: str - # `stream_ordering` of the most recent event being persisted in the room. This doesn't - # need to be perfect, we just need *some* answer that points to a real event in the - # room in case we are the first ones inserting into the `sliding_sync_joined_rooms` - # table because of the `NON NULL` constraint on `event_stream_ordering`. In reality, - # `_update_sliding_sync_tables_with_new_persisted_events_txn()` is run after - # `_update_current_state_txn()` whenever a new event is persisted to update it to the - # correct latest value. - # - # This should be *some* value that points to a real event in the room if we are - # still joined to the room and some state is changing (`to_insert` or `to_delete`). - joined_room_best_effort_most_recent_stream_ordering: Optional[int] # If the row doesn't exist in the `sliding_sync_joined_rooms` table, we need to # fully-insert it which means we also need to include a `bump_stamp` value to use # for the row. This should only be populated when we're trying to fully-insert a @@ -401,6 +399,9 @@ class PersistEventsStore: `stream_ordering`). delta_state: Deltas that are going to be used to update the `current_state_events` table. Changes to the current state of the room. + + Returns: + SlidingSyncTableChanges """ to_insert = delta_state.to_insert to_delete = delta_state.to_delete @@ -410,7 +411,6 @@ class PersistEventsStore: if not to_insert and not to_delete: return SlidingSyncTableChanges( room_id=room_id, - joined_room_best_effort_most_recent_stream_ordering=None, joined_room_bump_stamp_to_fully_insert=None, joined_room_updates={}, membership_snapshot_shared_insert_values={}, @@ -469,24 +469,24 @@ class PersistEventsStore: membership_event_id, user_id, ) in membership_event_id_to_user_id_map.items(): - # We should only be seeing events with `stream_ordering`/`instance_name` assigned by this point - membership_event_stream_ordering = membership_event_map[ - membership_event_id - ].internal_metadata.stream_ordering - assert membership_event_stream_ordering is not None - membership_event_instance_name = membership_event_map[ - membership_event_id - ].internal_metadata.instance_name - assert membership_event_instance_name is not None - membership_infos_to_insert_membership_snapshots.append( + # XXX: We don't use `SlidingSyncMembershipInfoWithEventPos` here + # because we're sourcing the event from `events_and_contexts`, we + # can't rely on `stream_ordering`/`instance_name` being correct. We + # could be working with events that were previously persisted as an + # `outlier` with one `stream_ordering` but are now being persisted + # again and de-outliered and assigned a different `stream_ordering` + # that won't end up being used. Since we call + # `_calculate_sliding_sync_table_changes()` before + # `_update_outliers_txn()` which fixes this discrepancy (always use + # the `stream_ordering` from the first time it was persisted), we're + # working with an unreliable `stream_ordering` value that will + # possibly be unused and not make it into the `events` table. SlidingSyncMembershipInfo( user_id=user_id, sender=membership_event_map[membership_event_id].sender, membership_event_id=membership_event_id, membership=membership_event_map[membership_event_id].membership, - membership_event_stream_ordering=membership_event_stream_ordering, - membership_event_instance_name=membership_event_instance_name, ) ) @@ -568,7 +568,6 @@ class PersistEventsStore: # `_update_sliding_sync_tables_with_new_persisted_events_txn()`) # joined_room_updates: SlidingSyncStateInsertValues = {} - best_effort_most_recent_stream_ordering: Optional[int] = None bump_stamp_to_fully_insert: Optional[int] = None if not delta_state.no_longer_in_room: current_state_ids_map = {} @@ -632,9 +631,7 @@ class PersistEventsStore: # Otherwise, we need to find a couple events that we were reset to. if missing_event_ids: - remaining_events = await self.store.get_events( - current_state_ids_map.values() - ) + remaining_events = await self.store.get_events(missing_event_ids) # There shouldn't be any missing events assert ( remaining_events.keys() == missing_event_ids @@ -657,52 +654,9 @@ class PersistEventsStore: elif state_key == (EventTypes.Name, ""): joined_room_updates["room_name"] = None - # Figure out `best_effort_most_recent_stream_ordering`. This doesn't need to - # be perfect, we just need *some* answer that points to a real event in the - # room in case we are the first ones inserting into the - # `sliding_sync_joined_rooms` table because of the `NON NULL` constraint on - # `event_stream_ordering`. In reality, - # `_update_sliding_sync_tables_with_new_persisted_events_txn()` is run after - # `_update_current_state_txn()` whenever a new event is persisted to update - # it to the correct latest value. - # - if len(events_and_contexts) > 0: - # Since the list is sorted ascending by `stream_ordering`, the last event - # should have the highest `stream_ordering`. - best_effort_most_recent_stream_ordering = events_and_contexts[-1][ - 0 - ].internal_metadata.stream_ordering - else: - # If there are no `events_and_contexts`, we assume it's one of two scenarios: - # 1. If there are new state `to_insert` but no `events_and_contexts`, - # then it's a state reset. - # 2. Otherwise, it's some partial-state room re-syncing the current state and - # going through un-partial process. - # - # Either way, we assume no new events are being persisted and we can - # find the latest already in the database. Since this is a best-effort - # value, we don't need to be perfect although I think we're pretty close - # here. - most_recent_event_pos_results = ( - await self.store.get_last_event_pos_in_room( - room_id, event_types=None - ) - ) - assert most_recent_event_pos_results, ( - f"We should not be seeing `None` here because we are still in the room ({room_id}) and " - + "it should at-least have a join membership event that's keeping us here." - ) - best_effort_most_recent_stream_ordering = most_recent_event_pos_results[ - 1 - ].stream - - # We should have found a value if we are still in the room - assert best_effort_most_recent_stream_ordering is not None - return SlidingSyncTableChanges( room_id=room_id, # For `sliding_sync_joined_rooms` - joined_room_best_effort_most_recent_stream_ordering=best_effort_most_recent_stream_ordering, joined_room_bump_stamp_to_fully_insert=bump_stamp_to_fully_insert, joined_room_updates=joined_room_updates, # For `sliding_sync_membership_snapshots` @@ -1773,31 +1727,53 @@ class PersistEventsStore: # # We only need to update when one of the relevant state values has changed if sliding_sync_table_changes.joined_room_updates: - # This should be *some* value that points to a real event in the room if - # we are still joined to the room. - assert ( - sliding_sync_table_changes.joined_room_best_effort_most_recent_stream_ordering - is not None + sliding_sync_updates_keys = ( + sliding_sync_table_changes.joined_room_updates.keys() + ) + sliding_sync_updates_values = ( + sliding_sync_table_changes.joined_room_updates.values() ) - self.db_pool.simple_upsert_txn( - txn, - table="sliding_sync_joined_rooms", - keyvalues={"room_id": room_id}, - values=sliding_sync_table_changes.joined_room_updates, - insertion_values={ - # The reason we're only *inserting* (not *updating*) - # `event_stream_ordering` here is because the column has a `NON - # NULL` constraint and we need *some* answer. And if the row - # already exists, it already has the correct value and it's - # better to just rely on - # `_update_sliding_sync_tables_with_new_persisted_events_txn()` - # to do the right thing (same for `bump_stamp`). - "event_stream_ordering": sliding_sync_table_changes.joined_room_best_effort_most_recent_stream_ordering, - # If we're trying to fully-insert a row, we need to provide a - # value for `bump_stamp` if it exists for the room. - "bump_stamp": sliding_sync_table_changes.joined_room_bump_stamp_to_fully_insert, - }, + args: List[Any] = [ + room_id, + room_id, + sliding_sync_table_changes.joined_room_bump_stamp_to_fully_insert, + ] + args.extend(iter(sliding_sync_updates_values)) + + # XXX: We use a sub-query for `stream_ordering` because it's unreliable to + # pre-calculate from `events_and_contexts` at the time when + # `_calculate_sliding_sync_table_changes()` is ran. We could be working + # with events that were previously persisted as an `outlier` with one + # `stream_ordering` but are now being persisted again and de-outliered + # and assigned a different `stream_ordering`. Since we call + # `_calculate_sliding_sync_table_changes()` before + # `_update_outliers_txn()` which fixes this discrepancy (always use the + # `stream_ordering` from the first time it was persisted), we're working + # with an unreliable `stream_ordering` value that will possibly be + # unused and not make it into the `events` table. + # + # We don't update `event_stream_ordering` `ON CONFLICT` because it's + # simpler and we can just rely on + # `_update_sliding_sync_tables_with_new_persisted_events_txn()` to do + # the right thing (same for `bump_stamp`). The only reason we're + # inserting `event_stream_ordering` here is because the column has a + # `NON NULL` constraint and we need some answer. + txn.execute( + f""" + INSERT INTO sliding_sync_joined_rooms + (room_id, event_stream_ordering, bump_stamp, {", ".join(sliding_sync_updates_keys)}) + VALUES ( + ?, + (SELECT stream_ordering FROM events WHERE room_id = ? ORDER BY stream_ordering DESC LIMIT 1), + ?, + {", ".join("?" for _ in sliding_sync_updates_values)} + ) + ON CONFLICT (room_id) + DO UPDATE SET + {", ".join(f"{key} = EXCLUDED.{key}" for key in sliding_sync_updates_keys)} + """, + args, ) # We now update `local_current_membership`. We do this regardless @@ -1854,38 +1830,63 @@ class PersistEventsStore: if sliding_sync_table_changes.to_insert_membership_snapshots: # Update the `sliding_sync_membership_snapshots` table # - # We need to insert/update regardless of whether we have `sliding_sync_snapshot_keys` - # because there are other fields in the `ON CONFLICT` upsert to run (see - # inherit case above for more context when this happens). - self.db_pool.simple_upsert_many_txn( - txn=txn, - table="sliding_sync_membership_snapshots", - key_names=("room_id", "user_id"), - key_values=[ - (room_id, membership_info.user_id) - for membership_info in sliding_sync_table_changes.to_insert_membership_snapshots - ], - value_names=[ - "sender", - "membership_event_id", - "membership", - "event_stream_ordering", - "event_instance_name", - ] - + list( - sliding_sync_table_changes.membership_snapshot_shared_insert_values.keys() - ), - value_values=[ + sliding_sync_snapshot_keys = ( + sliding_sync_table_changes.membership_snapshot_shared_insert_values.keys() + ) + sliding_sync_snapshot_values = ( + sliding_sync_table_changes.membership_snapshot_shared_insert_values.values() + ) + # We need to insert/update regardless of whether we have + # `sliding_sync_snapshot_keys` because there are other fields in the `ON + # CONFLICT` upsert to run (see inherit case (explained in + # `_calculate_sliding_sync_table_changes()`) for more context when this + # happens). + # + # XXX: We use a sub-query for `stream_ordering` because it's unreliable to + # pre-calculate from `events_and_contexts` at the time when + # `_calculate_sliding_sync_table_changes()` is ran. We could be working with + # events that were previously persisted as an `outlier` with one + # `stream_ordering` but are now being persisted again and de-outliered and + # assigned a different `stream_ordering` that won't end up being used. Since + # we call `_calculate_sliding_sync_table_changes()` before + # `_update_outliers_txn()` which fixes this discrepancy (always use the + # `stream_ordering` from the first time it was persisted), we're working + # with an unreliable `stream_ordering` value that will possibly be unused + # and not make it into the `events` table. + txn.execute_batch( + f""" + INSERT INTO sliding_sync_membership_snapshots + (room_id, user_id, sender, membership_event_id, membership, event_stream_ordering, event_instance_name + {("," + ", ".join(sliding_sync_snapshot_keys)) if sliding_sync_snapshot_keys else ""}) + VALUES ( + ?, ?, ?, ?, ?, + (SELECT stream_ordering FROM events WHERE event_id = ?), + (SELECT instance_name FROM events WHERE event_id = ?) + {("," + ", ".join("?" for _ in sliding_sync_snapshot_values)) if sliding_sync_snapshot_values else ""} + ) + ON CONFLICT (room_id, user_id) + DO UPDATE SET + sender = EXCLUDED.sender, + membership_event_id = EXCLUDED.membership_event_id, + membership = EXCLUDED.membership, + event_stream_ordering = EXCLUDED.event_stream_ordering + {("," + ", ".join(f"{key} = EXCLUDED.{key}" for key in sliding_sync_snapshot_keys)) if sliding_sync_snapshot_keys else ""} + """, + [ [ + room_id, + membership_info.user_id, membership_info.sender, membership_info.membership_event_id, membership_info.membership, - membership_info.membership_event_stream_ordering, - membership_info.membership_event_instance_name, + # XXX: We do not use `membership_info.membership_event_stream_ordering` here + # because it is an unreliable value. See XXX note above. + membership_info.membership_event_id, + # XXX: We do not use `membership_info.membership_event_instance_name` here + # because it is an unreliable value. See XXX note above. + membership_info.membership_event_id, ] - + list( - sliding_sync_table_changes.membership_snapshot_shared_insert_values.values() - ) + + list(sliding_sync_snapshot_values) for membership_info in sliding_sync_table_changes.to_insert_membership_snapshots ], ) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index b86f873eba..49ca985c4d 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -37,7 +37,7 @@ from synapse.storage.database import ( from synapse.storage.databases.main.events import ( SLIDING_SYNC_RELEVANT_STATE_SET, PersistEventsStore, - SlidingSyncMembershipInfo, + SlidingSyncMembershipInfoWithEventPos, SlidingSyncMembershipSnapshotSharedInsertValues, SlidingSyncStateInsertValues, ) @@ -1994,9 +1994,9 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS to_insert_membership_snapshots: Dict[ Tuple[str, str], SlidingSyncMembershipSnapshotSharedInsertValues ] = {} - to_insert_membership_infos: Dict[Tuple[str, str], SlidingSyncMembershipInfo] = ( - {} - ) + to_insert_membership_infos: Dict[ + Tuple[str, str], SlidingSyncMembershipInfoWithEventPos + ] = {} for ( room_id, room_id_from_rooms_table, @@ -2185,15 +2185,17 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS to_insert_membership_snapshots[(room_id, user_id)] = ( sliding_sync_membership_snapshots_insert_map ) - to_insert_membership_infos[(room_id, user_id)] = SlidingSyncMembershipInfo( - user_id=user_id, - sender=sender, - membership_event_id=membership_event_id, - membership=membership, - membership_event_stream_ordering=membership_event_stream_ordering, - # If instance_name is null we default to "master" - membership_event_instance_name=membership_event_instance_name - or "master", + to_insert_membership_infos[(room_id, user_id)] = ( + SlidingSyncMembershipInfoWithEventPos( + user_id=user_id, + sender=sender, + membership_event_id=membership_event_id, + membership=membership, + membership_event_stream_ordering=membership_event_stream_ordering, + # If instance_name is null we default to "master" + membership_event_instance_name=membership_event_instance_name + or "master", + ) ) def _fill_table_txn(txn: LoggingTransaction) -> None: diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py index d0bbc1c803..621f46fff8 100644 --- a/tests/storage/test_sliding_sync_tables.py +++ b/tests/storage/test_sliding_sync_tables.py @@ -38,6 +38,7 @@ from synapse.storage.databases.main.events_bg_updates import ( _resolve_stale_data_in_sliding_sync_joined_rooms_table, _resolve_stale_data_in_sliding_sync_membership_snapshots_table, ) +from synapse.types import create_requester from synapse.util import Clock from tests.test_utils.event_injection import create_event @@ -925,6 +926,128 @@ class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase): user2_snapshot, ) + @parameterized.expand( + # Test both an insert an upsert into the + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` to exercise + # more possibilities of things going wrong. + [ + ("insert", True), + ("upsert", False), + ] + ) + def test_joined_room_outlier_and_deoutlier( + self, description: str, should_insert: bool + ) -> None: + """ + This is a regression test. + + This is to simulate the case where an event is first persisted as an outlier + (like a remote invite) and then later persisted again to de-outlier it. The + first the time, the `outlier` is persisted with one `stream_ordering` but when + persisted again and de-outliered, it is assigned a different `stream_ordering` + that won't end up being used. Since we call + `_calculate_sliding_sync_table_changes()` before `_update_outliers_txn()` which + fixes this discrepancy (always use the `stream_ordering` from the first time it + was persisted), make sure we're not using an unreliable `stream_ordering` values + that will cause `FOREIGN KEY constraint failed` in the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_version = RoomVersions.V10 + room_id = self.helper.create_room_as( + user2_id, tok=user2_tok, room_version=room_version.identifier + ) + + if should_insert: + # Clear these out so we always insert + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + desc="TODO", + ) + ) + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id}, + desc="TODO", + ) + ) + + # Create a membership event (which triggers an insert into + # `sliding_sync_membership_snapshots`) + membership_event_dict = { + "type": EventTypes.Member, + "state_key": user1_id, + "sender": user1_id, + "room_id": room_id, + "content": {EventContentFields.MEMBERSHIP: Membership.JOIN}, + } + # Create a relevant state event (which triggers an insert into + # `sliding_sync_joined_rooms`) + state_event_dict = { + "type": EventTypes.Name, + "state_key": "", + "sender": user2_id, + "room_id": room_id, + "content": {EventContentFields.ROOM_NAME: "my super room"}, + } + event_dicts_to_persist = [ + membership_event_dict, + state_event_dict, + ] + + for event_dict in event_dicts_to_persist: + events_to_persist = [] + + # Create the events as an outliers + ( + event, + unpersisted_context, + ) = self.get_success( + self.hs.get_event_creation_handler().create_event( + requester=create_requester(user1_id), + event_dict=event_dict, + outlier=True, + ) + ) + # FIXME: Should we use an `EventContext.for_outlier(...)` here? + # Doesn't seem to matter for this test. + context = self.get_success(unpersisted_context.persist(event)) + events_to_persist.append((event, context)) + + # Create the event again but as an non-outlier. This will de-outlier the event + # when we persist it. + ( + event, + unpersisted_context, + ) = self.get_success( + self.hs.get_event_creation_handler().create_event( + requester=create_requester(user1_id), + event_dict=event_dict, + outlier=False, + ) + ) + context = self.get_success(unpersisted_context.persist(event)) + events_to_persist.append((event, context)) + + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + for event, context in events_to_persist: + self.get_success( + persist_controller.persist_event( + event, + context, + ) + ) + + # We're just testing that it does not explode + def test_joined_room_meta_state_reset(self) -> None: """ Test that a state reset on the room name is reflected in the From 7098d47f29d38daa5089bc1dbf0c60e99c5cafeb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Aug 2024 08:54:07 +0100 Subject: [PATCH 174/332] Sliding sync: Fix bg update again (v3) (#17634) Follow-up to https://github.com/element-hq/synapse/pull/17631 and https://github.com/element-hq/synapse/pull/17632 to fix-up https://github.com/element-hq/synapse/pull/17599 --------- Co-authored-by: Eric Eastwood --- changelog.d/17634.misc | 1 + .../databases/main/events_bg_updates.py | 31 ++++++++++++++++--- 2 files changed, 27 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17634.misc diff --git a/changelog.d/17634.misc b/changelog.d/17634.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17634.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 49ca985c4d..2cb3f1d016 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1961,9 +1961,30 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS return 0 def _find_previous_membership_txn( - txn: LoggingTransaction, room_id: str, user_id: str, stream_ordering: int + txn: LoggingTransaction, room_id: str, user_id: str, event_id: str ) -> Tuple[str, str]: # Find the previous invite/knock event before the leave event + # + # Here are some notes on how we landed on this query: + # + # We're using `topological_ordering` instead of `stream_ordering` because + # somehow it's possible to have your `leave` event backfilled with a + # negative `stream_ordering` and your previous `invite` event with a + # positive `stream_ordering` so we wouldn't have a chance of finding the + # previous membership with a naive `event_stream_ordering < ?` comparison. + # + # Also be careful because `room_memberships.event_stream_ordering` is + # nullable and not always filled in. You would need to join on `events` to + # rely on `events.stream_ordering` instead. Even though the + # `events.stream_ordering` also doesn't have a `NOT NULL` constraint, it + # doesn't have any rows where this is the case (checked on `matrix.org`). + # The fact the `events.stream_ordering` is a nullable column is a holdover + # from a rename of the column. + # + # You might also consider using the `event_auth` table to find the previous + # membership, but there are cases where somehow a membership event doesn't + # point back to the previous membership event in the auth events (unknown + # cause). txn.execute( """ SELECT event_id, membership @@ -1972,14 +1993,14 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS WHERE room_id = ? AND m.user_id = ? - AND e.stream_ordering < ? - ORDER BY e.stream_ordering DESC + AND e.event_id != ? + ORDER BY e.topological_ordering DESC LIMIT 1 """, ( room_id, user_id, - stream_ordering, + event_id, ), ) row = txn.fetchone() @@ -2106,7 +2127,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS _find_previous_membership_txn, room_id, user_id, - membership_event_stream_ordering, + membership_event_id, ) ) From 89801e04ca209a3675375939faa8422e349ecbd7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Aug 2024 08:54:14 +0100 Subject: [PATCH 175/332] Sliding sync: Ignore tables with no create event in current state (#17633) --- changelog.d/17633.misc | 1 + .../databases/main/events_bg_updates.py | 59 ++++++++++++------- 2 files changed, 38 insertions(+), 22 deletions(-) create mode 100644 changelog.d/17633.misc diff --git a/changelog.d/17633.misc b/changelog.d/17633.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17633.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 2cb3f1d016..cb23f433bc 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -2080,34 +2080,49 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # We're iterating over rooms that we are joined to so they should # have `current_state_events` and we should have some current state # for each room - assert current_state_ids_map + if current_state_ids_map: + fetched_events = await self.get_events( + current_state_ids_map.values() + ) - fetched_events = await self.get_events(current_state_ids_map.values()) + current_state_map: StateMap[EventBase] = { + state_key: fetched_events[event_id] + for state_key, event_id in current_state_ids_map.items() + # `get_events(...)` will filter out events for unknown room versions + if event_id in fetched_events + } - current_state_map: StateMap[EventBase] = { - state_key: fetched_events[event_id] - for state_key, event_id in current_state_ids_map.items() - # `get_events(...)` will filter out events for unknown room versions - if event_id in fetched_events - } + # Can happen for unknown room versions (old room versions that aren't known + # anymore) since `get_events(...)` will filter out events for unknown room + # versions + if not current_state_map: + continue - # Can happen for unknown room versions (old room versions that aren't known - # anymore) since `get_events(...)` will filter out events for unknown room - # versions - if not current_state_map: - continue - - state_insert_values = ( - PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + state_insert_values = PersistEventsStore._get_sliding_sync_insert_values_from_state_map( current_state_map ) - ) - sliding_sync_membership_snapshots_insert_map.update(state_insert_values) - # We should have some insert values for each room, even if they are `None` - assert sliding_sync_membership_snapshots_insert_map + sliding_sync_membership_snapshots_insert_map.update( + state_insert_values + ) + # We should have some insert values for each room, even if they are `None` + assert sliding_sync_membership_snapshots_insert_map - # We have current state to work from - sliding_sync_membership_snapshots_insert_map["has_known_state"] = True + # We have current state to work from + sliding_sync_membership_snapshots_insert_map["has_known_state"] = ( + True + ) + else: + # Although we expect every room to have a create event (even + # past unknown room versions since we haven't supported one + # without it), there seem to be some corrupted rooms in + # practice that don't have the create event in the + # `current_state_events` table. The create event does exist + # in the events table though. We'll just say that we don't + # know the state for these rooms and continue on with our + # day. + sliding_sync_membership_snapshots_insert_map["has_known_state"] = ( + False + ) elif membership in (Membership.INVITE, Membership.KNOCK) or ( membership == Membership.LEAVE and is_outlier ): From cdd5979129211a6906ca12c4f6f6ac7a108c2fb7 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 30 Aug 2024 10:07:46 +0200 Subject: [PATCH 176/332] Replace isort and black with ruff (#17620) Ruff now has decent parity with black and isort, so this is going to just save us a bunch of time --- .github/workflows/fix_lint.yaml | 14 +- .github/workflows/tests.yml | 11 +- changelog.d/17620.misc | 1 + docs/code_style.md | 4 +- poetry.lock | 126 ++-------- pylint.cfg | 280 --------------------- pyproject.toml | 38 ++- scripts-dev/lint.sh | 9 +- synapse/rest/key/v2/remote_key_resource.py | 2 +- synapse/storage/_base.py | 7 +- synmark/__init__.py | 4 +- 11 files changed, 56 insertions(+), 440 deletions(-) create mode 100644 changelog.d/17620.misc delete mode 100644 pylint.cfg diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml index f1e35fcd99..5970b4e826 100644 --- a/.github/workflows/fix_lint.yaml +++ b/.github/workflows/fix_lint.yaml @@ -29,17 +29,9 @@ jobs: with: install-project: "false" - - name: Import order (isort) + - name: Run ruff continue-on-error: true - run: poetry run isort . - - - name: Code style (black) - continue-on-error: true - run: poetry run black . - - - name: Semantic checks (ruff) - continue-on-error: true - run: poetry run ruff --fix . + run: poetry run ruff check --fix . - run: cargo clippy --all-features --fix -- -D warnings continue-on-error: true @@ -49,4 +41,4 @@ jobs: - uses: stefanzweifel/git-auto-commit-action@v5 with: - commit_message: "Attempt to fix linting" + commit_message: "Attempt to fix linting" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 730f8552d9..add046ec6a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -131,15 +131,8 @@ jobs: with: install-project: "false" - - name: Import order (isort) - run: poetry run isort --check --diff . - - - name: Code style (black) - run: poetry run black --check --diff . - - - name: Semantic checks (ruff) - # --quiet suppresses the update check. - run: poetry run ruff check --quiet . + - name: Check style + run: poetry run ruff check --output-format=github . lint-mypy: runs-on: ubuntu-latest diff --git a/changelog.d/17620.misc b/changelog.d/17620.misc new file mode 100644 index 0000000000..f583cdcb38 --- /dev/null +++ b/changelog.d/17620.misc @@ -0,0 +1 @@ +Replace `isort` and `black with `ruff`. diff --git a/docs/code_style.md b/docs/code_style.md index 026001b8a3..c28aaadad0 100644 --- a/docs/code_style.md +++ b/docs/code_style.md @@ -8,9 +8,7 @@ errors in code. The necessary tools are: -- [black](https://black.readthedocs.io/en/stable/), a source code formatter; -- [isort](https://pycqa.github.io/isort/), which organises each file's imports; -- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors; and +- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors and enforce a consistent style; and - [mypy](https://mypy.readthedocs.io/en/stable/), a type checker. See [the contributing guide](development/contributing_guide.md#run-the-linters) for instructions diff --git a/poetry.lock b/poetry.lock index 0fffa8e9ba..731adff9d4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -105,52 +105,6 @@ files = [ tests = ["pytest (>=3.2.1,!=3.3.0)"] typecheck = ["mypy"] -[[package]] -name = "black" -version = "24.8.0" -description = "The uncompromising code formatter." -optional = false -python-versions = ">=3.8" -files = [ - {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"}, - {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"}, - {file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"}, - {file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"}, - {file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"}, - {file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"}, - {file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"}, - {file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"}, - {file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"}, - {file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"}, - {file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"}, - {file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"}, - {file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"}, - {file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"}, - {file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"}, - {file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"}, - {file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"}, - {file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"}, - {file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"}, - {file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"}, - {file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"}, - {file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"}, -] - -[package.dependencies] -click = ">=8.0.0" -mypy-extensions = ">=0.4.3" -packaging = ">=22.0" -pathspec = ">=0.9.0" -platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} - -[package.extras] -colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] -jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] -uvloop = ["uvloop (>=0.15.2)"] - [[package]] name = "bleach" version = "6.1.0" @@ -832,20 +786,6 @@ tomli = {version = "*", markers = "python_version < \"3.11\""} [package.extras] scripts = ["click (>=6.0)"] -[[package]] -name = "isort" -version = "5.13.2" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, - {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, -] - -[package.extras] -colors = ["colorama (>=0.4.6)"] - [[package]] name = "jaeger-client" version = "4.8.0" @@ -1494,17 +1434,6 @@ files = [ [package.extras] dev = ["jinja2"] -[[package]] -name = "pathspec" -version = "0.11.1" -description = "Utility library for gitignore style pattern matching of file paths." -optional = false -python-versions = ">=3.7" -files = [ - {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, - {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, -] - [[package]] name = "phonenumbers" version = "8.13.44" @@ -1638,21 +1567,6 @@ files = [ {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, ] -[[package]] -name = "platformdirs" -version = "3.1.1" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -optional = false -python-versions = ">=3.7" -files = [ - {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, - {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, -] - -[package.extras] -docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] - [[package]] name = "prometheus-client" version = "0.20.0" @@ -2354,29 +2268,29 @@ files = [ [[package]] name = "ruff" -version = "0.5.5" +version = "0.6.2" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.5.5-py3-none-linux_armv6l.whl", hash = "sha256:605d589ec35d1da9213a9d4d7e7a9c761d90bba78fc8790d1c5e65026c1b9eaf"}, - {file = "ruff-0.5.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00817603822a3e42b80f7c3298c8269e09f889ee94640cd1fc7f9329788d7bf8"}, - {file = "ruff-0.5.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:187a60f555e9f865a2ff2c6984b9afeffa7158ba6e1eab56cb830404c942b0f3"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe26fc46fa8c6e0ae3f47ddccfbb136253c831c3289bba044befe68f467bfb16"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad25dd9c5faac95c8e9efb13e15803cd8bbf7f4600645a60ffe17c73f60779b"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f70737c157d7edf749bcb952d13854e8f745cec695a01bdc6e29c29c288fc36e"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:cfd7de17cef6ab559e9f5ab859f0d3296393bc78f69030967ca4d87a541b97a0"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a09b43e02f76ac0145f86a08e045e2ea452066f7ba064fd6b0cdccb486f7c3e7"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0b856cb19c60cd40198be5d8d4b556228e3dcd545b4f423d1ad812bfdca5884"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3687d002f911e8a5faf977e619a034d159a8373514a587249cc00f211c67a091"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ac9dc814e510436e30d0ba535f435a7f3dc97f895f844f5b3f347ec8c228a523"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:af9bdf6c389b5add40d89b201425b531e0a5cceb3cfdcc69f04d3d531c6be74f"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d40a8533ed545390ef8315b8e25c4bb85739b90bd0f3fe1280a29ae364cc55d8"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:cab904683bf9e2ecbbe9ff235bfe056f0eba754d0168ad5407832928d579e7ab"}, - {file = "ruff-0.5.5-py3-none-win32.whl", hash = "sha256:696f18463b47a94575db635ebb4c178188645636f05e934fdf361b74edf1bb2d"}, - {file = "ruff-0.5.5-py3-none-win_amd64.whl", hash = "sha256:50f36d77f52d4c9c2f1361ccbfbd09099a1b2ea5d2b2222c586ab08885cf3445"}, - {file = "ruff-0.5.5-py3-none-win_arm64.whl", hash = "sha256:3191317d967af701f1b73a31ed5788795936e423b7acce82a2b63e26eb3e89d6"}, - {file = "ruff-0.5.5.tar.gz", hash = "sha256:cc5516bdb4858d972fbc31d246bdb390eab8df1a26e2353be2dbc0c2d7f5421a"}, + {file = "ruff-0.6.2-py3-none-linux_armv6l.whl", hash = "sha256:5c8cbc6252deb3ea840ad6a20b0f8583caab0c5ef4f9cca21adc5a92b8f79f3c"}, + {file = "ruff-0.6.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:17002fe241e76544448a8e1e6118abecbe8cd10cf68fde635dad480dba594570"}, + {file = "ruff-0.6.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3dbeac76ed13456f8158b8f4fe087bf87882e645c8e8b606dd17b0b66c2c1158"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:094600ee88cda325988d3f54e3588c46de5c18dae09d683ace278b11f9d4d534"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:316d418fe258c036ba05fbf7dfc1f7d3d4096db63431546163b472285668132b"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d72b8b3abf8a2d51b7b9944a41307d2f442558ccb3859bbd87e6ae9be1694a5d"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2aed7e243be68487aa8982e91c6e260982d00da3f38955873aecd5a9204b1d66"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d371f7fc9cec83497fe7cf5eaf5b76e22a8efce463de5f775a1826197feb9df8"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8f310d63af08f583363dfb844ba8f9417b558199c58a5999215082036d795a1"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7db6880c53c56addb8638fe444818183385ec85eeada1d48fc5abe045301b2f1"}, + {file = "ruff-0.6.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1175d39faadd9a50718f478d23bfc1d4da5743f1ab56af81a2b6caf0a2394f23"}, + {file = "ruff-0.6.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:5b939f9c86d51635fe486585389f54582f0d65b8238e08c327c1534844b3bb9a"}, + {file = "ruff-0.6.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d0d62ca91219f906caf9b187dea50d17353f15ec9bb15aae4a606cd697b49b4c"}, + {file = "ruff-0.6.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7438a7288f9d67ed3c8ce4d059e67f7ed65e9fe3aa2ab6f5b4b3610e57e3cb56"}, + {file = "ruff-0.6.2-py3-none-win32.whl", hash = "sha256:279d5f7d86696df5f9549b56b9b6a7f6c72961b619022b5b7999b15db392a4da"}, + {file = "ruff-0.6.2-py3-none-win_amd64.whl", hash = "sha256:d9f3469c7dd43cd22eb1c3fc16926fb8258d50cb1b216658a07be95dd117b0f2"}, + {file = "ruff-0.6.2-py3-none-win_arm64.whl", hash = "sha256:f28fcd2cd0e02bdf739297516d5643a945cc7caf09bd9bcb4d932540a5ea4fa9"}, + {file = "ruff-0.6.2.tar.gz", hash = "sha256:239ee6beb9e91feb8e0ec384204a763f36cb53fb895a1a364618c6abb076b3be"}, ] [[package]] @@ -3190,4 +3104,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "c165cdc1f6612c9f1b5bfd8063c23e2d595d717dd8ac1a468519e902be2cdf93" +content-hash = "2bf09e2b68f3abd1a0f9ff2227eb3026ac3d034845acfc120d0b1cb8167ea43b" diff --git a/pylint.cfg b/pylint.cfg deleted file mode 100644 index 2368997112..0000000000 --- a/pylint.cfg +++ /dev/null @@ -1,280 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Profiled execution. -profile=no - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - - -[MESSAGES CONTROL] - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. -#enable= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=missing-docstring - - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Add a comment according to your evaluation note. This is used by the global -# evaluation report (RP0004). -comment=no - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). -ignored-classes=SQLObject - -# When zope mode is activated, add a predefined set of Zope acquired attributes -# to generated-members. -zope=no - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E0201 when accessed. Python regular -# expressions are accepted. -generated-members=REQUEST,acl_users,aq_parent - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the beginning of the name of dummy variables -# (i.e. not used). -dummy-variables-rgx=_$|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - - -[BASIC] - -# Required attributes for module, separated by a comma -required-attributes= - -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,apply,input - -# Regular expression which should only match correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression which should only match correct module level names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression which should only match correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression which should only match correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct instance attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct attribute names in class -# bodies -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression which should only match correct list comprehension / -# generator expression variable names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=__.*__ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=80 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - - -[CLASSES] - -# List of interface methods to ignore, separated by a comma. This is used for -# instance to not check methods defines in Zope's Interface base class. -ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/pyproject.toml b/pyproject.toml index b31eca75ec..058c35c829 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,14 +34,9 @@ name = "Internal Changes" showcontent = true -[tool.black] -target-version = ['py38', 'py39', 'py310', 'py311'] -# black ignores everything in .gitignore by default, see -# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore -# Use `extend-exclude` if you want to exclude something in addition to this. - [tool.ruff] line-length = 88 +target-version = "py38" [tool.ruff.lint] # See https://beta.ruff.rs/docs/rules/#error-e @@ -63,6 +58,8 @@ select = [ "W", # pyflakes "F", + # isort + "I001", # flake8-bugbear "B0", # flake8-comprehensions @@ -79,17 +76,20 @@ select = [ "EXE", ] -[tool.isort] -line_length = 88 -sections = ["FUTURE", "STDLIB", "THIRDPARTY", "TWISTED", "FIRSTPARTY", "TESTS", "LOCALFOLDER"] -default_section = "THIRDPARTY" -known_first_party = ["synapse"] -known_tests = ["tests"] -known_twisted = ["twisted", "OpenSSL"] -multi_line_output = 3 -include_trailing_comma = true -combine_as_imports = true -skip_gitignore = true +[tool.ruff.lint.isort] +combine-as-imports = true +section-order = ["future", "standard-library", "third-party", "twisted", "first-party", "testing", "local-folder"] +known-first-party = ["synapse"] + +[tool.ruff.lint.isort.sections] +twisted = ["twisted", "OpenSSL"] +testing = ["tests"] + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" [tool.maturin] manifest-path = "rust/Cargo.toml" @@ -320,9 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -isort = ">=5.10.1" -black = ">=22.7.0" -ruff = "0.5.5" +ruff = "0.6.2" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 8acf0a6fb8..fa6ff90708 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -1,8 +1,9 @@ #!/usr/bin/env bash # # Runs linting scripts over the local Synapse checkout -# black - opinionated code formatter # ruff - lints and finds mistakes +# mypy - typechecks python code +# cargo clippy - lints rust code set -e @@ -101,12 +102,6 @@ echo # Print out the commands being run set -x -# Ensure the sort order of imports. -isort "${files[@]}" - -# Ensure Python code conforms to an opinionated style. -python3 -m black "${files[@]}" - # Ensure the sample configuration file conforms to style checks. ./scripts-dev/config-lint.sh diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index a411ed614e..1975ebb477 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -28,7 +28,7 @@ from synapse._pydantic_compat import HAS_PYDANTIC_V2 if TYPE_CHECKING or HAS_PYDANTIC_V2: from pydantic.v1 import Extra, StrictInt, StrictStr else: - from pydantic import StrictInt, StrictStr, Extra + from pydantic import Extra, StrictInt, StrictStr from signedjson.sign import sign_json diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index e12ab94576..1ac85ad66d 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -23,8 +23,11 @@ import logging from abc import ABCMeta from typing import TYPE_CHECKING, Any, Collection, Dict, Iterable, Optional, Union -from synapse.storage.database import make_in_list_sql_clause # noqa: F401; noqa: F401 -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + make_in_list_sql_clause, # noqa: F401 +) from synapse.types import get_domain_from_id from synapse.util import json_decoder from synapse.util.caches.descriptors import CachedFunction diff --git a/synmark/__init__.py b/synmark/__init__.py index 8c47e50c7c..887fec2f96 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py @@ -27,7 +27,9 @@ from synapse.types import ISynapseReactor try: from twisted.internet.epollreactor import EPollReactor as Reactor except ImportError: - from twisted.internet.pollreactor import PollReactor as Reactor # type: ignore[assignment] + from twisted.internet.pollreactor import ( # type: ignore[assignment] + PollReactor as Reactor, + ) from twisted.internet.main import installReactor From 02ebcf7725fe89ab13a009e49fe473446e7b76c0 Mon Sep 17 00:00:00 2001 From: Michael Telatynski <7t3chguy@gmail.com> Date: Fri, 30 Aug 2024 13:52:57 +0100 Subject: [PATCH 177/332] Use custom stage UIA error for MAS cross-signing reset (#17509) Rather than 501 M_UNRECOGNISED Client side implementation at https://github.com/matrix-org/matrix-react-sdk/pull/12892/ --- changelog.d/17509.feature | 1 + synapse/rest/client/auth.py | 13 ++++++++++- synapse/rest/client/keys.py | 30 +++++++++++++++++++------ tests/handlers/test_oauth_delegation.py | 2 +- tests/rest/client/test_keys.py | 12 +++------- 5 files changed, 40 insertions(+), 18 deletions(-) create mode 100644 changelog.d/17509.feature diff --git a/changelog.d/17509.feature b/changelog.d/17509.feature new file mode 100644 index 0000000000..6d639ceb98 --- /dev/null +++ b/changelog.d/17509.feature @@ -0,0 +1 @@ +Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py index 4221f35937..32eeecd662 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py @@ -27,7 +27,7 @@ from twisted.web.server import Request from synapse.api.constants import LoginType from synapse.api.errors import LoginError, SynapseError from synapse.api.urls import CLIENT_API_PREFIX -from synapse.http.server import HttpServer, respond_with_html +from synapse.http.server import HttpServer, respond_with_html, respond_with_redirect from synapse.http.servlet import RestServlet, parse_string from synapse.http.site import SynapseRequest @@ -66,6 +66,17 @@ class AuthRestServlet(RestServlet): if not session: raise SynapseError(400, "No session supplied") + if ( + self.hs.config.experimental.msc3861.enabled + and stagetype == "org.matrix.cross_signing_reset" + ): + config = self.hs.config.experimental.msc3861 + if config.account_management_url is not None: + url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset" + else: + url = config.issuer + respond_with_redirect(request, str.encode(url)) + if stagetype == LoginType.RECAPTCHA: html = self.recaptcha_template.render( session=session, diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index eddad7d5b8..a33eb6c1f2 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -23,10 +23,13 @@ import logging import re from collections import Counter -from http import HTTPStatus from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple -from synapse.api.errors import Codes, InvalidAPICallError, SynapseError +from synapse.api.errors import ( + InteractiveAuthIncompleteError, + InvalidAPICallError, + SynapseError, +) from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -409,11 +412,24 @@ class SigningKeyUploadServlet(RestServlet): else: url = config.issuer - raise SynapseError( - HTTPStatus.NOT_IMPLEMENTED, - "To reset your end-to-end encryption cross-signing identity, " - f"you first need to approve it at {url} and then try again.", - Codes.UNRECOGNIZED, + # We use a dummy session ID as this isn't really a UIA flow, but we + # reuse the same API shape for better client compatibility. + raise InteractiveAuthIncompleteError( + "dummy", + { + "session": "dummy", + "flows": [ + {"stages": ["org.matrix.cross_signing_reset"]}, + ], + "params": { + "org.matrix.cross_signing_reset": { + "url": url, + }, + }, + "msg": "To reset your end-to-end encryption cross-signing " + f"identity, you first need to approve it at {url} and " + "then try again.", + }, ) else: # Without MSC3861, we require UIA. diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 036c539db2..5b5dc713d1 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -550,7 +550,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): access_token="mockAccessToken", ) - self.assertEqual(channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) def expect_unauthorized( self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index 8bbd109092..d9a210b616 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -315,9 +315,7 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase): "master_key": master_key2, }, ) - self.assertEqual( - channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body - ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) # Pretend that MAS did UIA and allowed us to replace the master key. channel = self.make_request( @@ -349,9 +347,7 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase): "master_key": master_key3, }, ) - self.assertEqual( - channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body - ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) # Pretend that MAS did UIA and allowed us to replace the master key. channel = self.make_request( @@ -376,6 +372,4 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase): "master_key": master_key3, }, ) - self.assertEqual( - channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body - ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) From ca69d0f57165ecb10204ee433992b20af71cbe91 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 30 Aug 2024 16:04:08 +0200 Subject: [PATCH 178/332] MSC3861: load the issuer and account management URLs from OIDC discovery (#17407) This will help mitigating any discrepancies between the issuer configured and the one returned by the OIDC provider. This also removes the need for configuring the `account_management_url` explicitely, as it will now be loaded from the OIDC discovery, as per MSC2965. Because we may now fetch stuff for the .well-known/matrix/client endpoint, this also transforms the client well-known resource to be asynchronous. --- changelog.d/17407.misc | 1 + synapse/api/auth/msc3861_delegated.py | 33 ++++++++++++++++++++++-- synapse/rest/client/auth.py | 16 ++++++++---- synapse/rest/client/auth_issuer.py | 10 ++++++-- synapse/rest/client/keys.py | 16 ++++++++---- synapse/rest/client/login.py | 2 +- synapse/rest/well_known.py | 37 +++++++++++++++------------ tests/rest/client/test_auth_issuer.py | 20 ++++++++++++++- tests/rest/test_well_known.py | 37 +++++++++++++++++---------- 9 files changed, 126 insertions(+), 46 deletions(-) create mode 100644 changelog.d/17407.misc diff --git a/changelog.d/17407.misc b/changelog.d/17407.misc new file mode 100644 index 0000000000..9ed6e61a5b --- /dev/null +++ b/changelog.d/17407.misc @@ -0,0 +1 @@ +MSC3861: load the issuer and account management URLs from OIDC discovery. diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 7361666c77..6bd845c7e3 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -121,7 +121,9 @@ class MSC3861DelegatedAuth(BaseAuth): self._hostname = hs.hostname self._admin_token = self._config.admin_token - self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) + self._issuer_metadata = RetryOnExceptionCachedCall[OpenIDProviderMetadata]( + self._load_metadata + ) if isinstance(auth_method, PrivateKeyJWTWithKid): # Use the JWK as the client secret when using the private_key_jwt method @@ -145,6 +147,33 @@ class MSC3861DelegatedAuth(BaseAuth): # metadata.validate_introspection_endpoint() return metadata + async def issuer(self) -> str: + """ + Get the configured issuer + + This will use the issuer value set in the metadata, + falling back to the one set in the config if not set in the metadata + """ + metadata = await self._issuer_metadata.get() + return metadata.issuer or self._config.issuer + + async def account_management_url(self) -> Optional[str]: + """ + Get the configured account management URL + + This will discover the account management URL from the issuer if it's not set in the config + """ + if self._config.account_management_url is not None: + return self._config.account_management_url + + try: + metadata = await self._issuer_metadata.get() + return metadata.get("account_management_uri", None) + # We don't want to raise here if we can't load the metadata + except Exception: + logger.warning("Failed to load metadata:", exc_info=True) + return None + async def _introspection_endpoint(self) -> str: """ Returns the introspection endpoint of the issuer @@ -154,7 +183,7 @@ class MSC3861DelegatedAuth(BaseAuth): if self._config.introspection_endpoint is not None: return self._config.introspection_endpoint - metadata = await self._load_metadata() + metadata = await self._issuer_metadata.get() return metadata.get("introspection_endpoint") async def _introspect_token(self, token: str) -> IntrospectionToken: diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py index 32eeecd662..b8dca7c797 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, cast from twisted.web.server import Request @@ -70,11 +70,17 @@ class AuthRestServlet(RestServlet): self.hs.config.experimental.msc3861.enabled and stagetype == "org.matrix.cross_signing_reset" ): - config = self.hs.config.experimental.msc3861 - if config.account_management_url is not None: - url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset" + # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self.auth) + + url = await auth.account_management_url() + if url is not None: + url = f"{url}?action=org.matrix.cross_signing_reset" else: - url = config.issuer + url = await auth.issuer() respond_with_redirect(request, str.encode(url)) if stagetype == LoginType.RECAPTCHA: diff --git a/synapse/rest/client/auth_issuer.py b/synapse/rest/client/auth_issuer.py index 77b9720956..acd0399d85 100644 --- a/synapse/rest/client/auth_issuer.py +++ b/synapse/rest/client/auth_issuer.py @@ -13,7 +13,7 @@ # limitations under the License. import logging import typing -from typing import Tuple +from typing import Tuple, cast from synapse.api.errors import Codes, SynapseError from synapse.http.server import HttpServer @@ -43,10 +43,16 @@ class AuthIssuerServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self._config = hs.config + self._auth = hs.get_auth() async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if self._config.experimental.msc3861.enabled: - return 200, {"issuer": self._config.experimental.msc3861.issuer} + # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self._auth) + return 200, {"issuer": await auth.issuer()} else: # Wouldn't expect this to be reached: the servelet shouldn't have been # registered. Still, fail gracefully if we are registered for some reason. diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index a33eb6c1f2..7025662fdc 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -23,7 +23,7 @@ import logging import re from collections import Counter -from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, cast from synapse.api.errors import ( InteractiveAuthIncompleteError, @@ -406,11 +406,17 @@ class SigningKeyUploadServlet(RestServlet): # explicitly mark the master key as replaceable. if self.hs.config.experimental.msc3861.enabled: if not master_key_updatable_without_uia: - config = self.hs.config.experimental.msc3861 - if config.account_management_url is not None: - url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset" + # If MSC3861 is enabled, we can assume self.auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self.auth) + + uri = await auth.account_management_url() + if uri is not None: + url = f"{uri}?action=org.matrix.cross_signing_reset" else: - url = config.issuer + url = await auth.issuer() # We use a dummy session ID as this isn't really a UIA flow, but we # reuse the same API shape for better client compatibility. diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index ae691bcdba..03b1e7edc4 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -268,7 +268,7 @@ class LoginRestServlet(RestServlet): approval_notice_medium=ApprovalNoticeMedium.NONE, ) - well_known_data = self._well_known_builder.get_well_known() + well_known_data = await self._well_known_builder.get_well_known() if well_known_data: result["well_known"] = well_known_data return 200, result diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index d0ca8ca46b..989e570671 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -18,12 +18,13 @@ # # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Optional, Tuple, cast from twisted.web.resource import Resource from twisted.web.server import Request -from synapse.http.server import set_cors_headers +from synapse.api.errors import NotFoundError +from synapse.http.server import DirectServeJsonResource from synapse.http.site import SynapseRequest from synapse.types import JsonDict from synapse.util import json_encoder @@ -38,8 +39,9 @@ logger = logging.getLogger(__name__) class WellKnownBuilder: def __init__(self, hs: "HomeServer"): self._config = hs.config + self._auth = hs.get_auth() - def get_well_known(self) -> Optional[JsonDict]: + async def get_well_known(self) -> Optional[JsonDict]: if not self._config.server.serve_client_wellknown: return None @@ -52,13 +54,20 @@ class WellKnownBuilder: # We use the MSC3861 values as they are used by multiple MSCs if self._config.experimental.msc3861.enabled: + # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self._auth) + result["org.matrix.msc2965.authentication"] = { - "issuer": self._config.experimental.msc3861.issuer + "issuer": await auth.issuer(), } - if self._config.experimental.msc3861.account_management_url is not None: + account_management_url = await auth.account_management_url() + if account_management_url is not None: result["org.matrix.msc2965.authentication"][ "account" - ] = self._config.experimental.msc3861.account_management_url + ] = account_management_url if self._config.server.extra_well_known_client_content: for ( @@ -71,26 +80,22 @@ class WellKnownBuilder: return result -class ClientWellKnownResource(Resource): +class ClientWellKnownResource(DirectServeJsonResource): """A Twisted web resource which renders the .well-known/matrix/client file""" isLeaf = 1 def __init__(self, hs: "HomeServer"): - Resource.__init__(self) + super().__init__() self._well_known_builder = WellKnownBuilder(hs) - def render_GET(self, request: SynapseRequest) -> bytes: - set_cors_headers(request) - r = self._well_known_builder.get_well_known() + async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + r = await self._well_known_builder.get_well_known() if not r: - request.setResponseCode(404) - request.setHeader(b"Content-Type", b"text/plain") - return b".well-known not available" + raise NotFoundError(".well-known not available") logger.debug("returning: %s", r) - request.setHeader(b"Content-Type", b"application/json") - return json_encoder.encode(r).encode("utf-8") + return 200, r class ServerWellKnownResource(Resource): diff --git a/tests/rest/client/test_auth_issuer.py b/tests/rest/client/test_auth_issuer.py index 964baeec32..299475a35c 100644 --- a/tests/rest/client/test_auth_issuer.py +++ b/tests/rest/client/test_auth_issuer.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus +from unittest.mock import AsyncMock from synapse.rest.client import auth_issuer @@ -50,10 +51,27 @@ class AuthIssuerTestCase(HomeserverTestCase): } ) def test_returns_issuer_when_oidc_enabled(self) -> None: - # Make an unauthenticated request for the discovery info. + # Patch the HTTP client to return the issuer metadata + req_mock = AsyncMock(return_value={"issuer": ISSUER}) + self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign] + channel = self.make_request( "GET", "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer", ) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual(channel.json_body, {"issuer": ISSUER}) + + req_mock.assert_called_with("https://account.example.com/.well-known/openid-configuration") + req_mock.reset_mock() + + # Second call it should use the cached value + channel = self.make_request( + "GET", + "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer", + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.json_body, {"issuer": ISSUER}) + req_mock.assert_not_called() diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index e166c13bc1..ac992766e8 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -17,6 +17,8 @@ # [This file includes modifications made by New Vector Limited] # # +from unittest.mock import AsyncMock + from twisted.web.resource import Resource from synapse.rest.well_known import well_known_resource @@ -112,7 +114,6 @@ class WellKnownTests(unittest.HomeserverTestCase): "msc3861": { "enabled": True, "issuer": "https://issuer", - "account_management_url": "https://my-account.issuer", "client_id": "id", "client_auth_method": "client_secret_post", "client_secret": "secret", @@ -122,18 +123,26 @@ class WellKnownTests(unittest.HomeserverTestCase): } ) def test_client_well_known_msc3861_oauth_delegation(self) -> None: - channel = self.make_request( - "GET", "/.well-known/matrix/client", shorthand=False - ) + # Patch the HTTP client to return the issuer metadata + req_mock = AsyncMock(return_value={"issuer": "https://issuer", "account_management_uri": "https://my-account.issuer"}) + self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign] - self.assertEqual(channel.code, 200) - self.assertEqual( - channel.json_body, - { - "m.homeserver": {"base_url": "https://homeserver/"}, - "org.matrix.msc2965.authentication": { - "issuer": "https://issuer", - "account": "https://my-account.issuer", + for _ in range(2): + channel = self.make_request( + "GET", "/.well-known/matrix/client", shorthand=False + ) + + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body, + { + "m.homeserver": {"base_url": "https://homeserver/"}, + "org.matrix.msc2965.authentication": { + "issuer": "https://issuer", + "account": "https://my-account.issuer", + }, }, - }, - ) + ) + + # It should have been called exactly once, because it gets cached + req_mock.assert_called_once_with("https://issuer/.well-known/openid-configuration") From dab88a7b1feaf1a07b227898911808fc269609fc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Aug 2024 16:22:57 +0100 Subject: [PATCH 179/332] Sliding Sync: Make `PerConnectionState` immutable (#17600) This is so that we can cache it. We also move the sliding sync types to `synapse/types/handlers/sliding_sync.py`. This is mainly in-prep for The only change in behaviour is that `RoomSyncConfig.combine_sync_config(..)` now returns a new room sync config rather than mutating in-place. Reviewable commit-by-commit. --------- Co-authored-by: Eric Eastwood --- changelog.d/17600.misc | 1 + scripts-dev/mypy_synapse_plugin.py | 19 +- synapse/handlers/sliding_sync/__init__.py | 17 +- synapse/handlers/sliding_sync/extensions.py | 14 +- synapse/handlers/sliding_sync/room_lists.py | 34 +- synapse/handlers/sliding_sync/store.py | 10 +- synapse/types/handlers/__init__.py | 358 +-------------- .../handlers/sliding_sync.py} | 413 ++++++++++++++++-- tests/handlers/test_sliding_sync.py | 23 +- 9 files changed, 444 insertions(+), 445 deletions(-) create mode 100644 changelog.d/17600.misc rename synapse/{handlers/sliding_sync/types.py => types/handlers/sliding_sync.py} (52%) diff --git a/changelog.d/17600.misc b/changelog.d/17600.misc new file mode 100644 index 0000000000..a81c67f6d1 --- /dev/null +++ b/changelog.d/17600.misc @@ -0,0 +1 @@ +Make the sliding sync `PerConnectionState` class immutable. diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 877b831751..509047b41b 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -38,6 +38,7 @@ from mypy.types import ( NoneType, TupleType, TypeAliasType, + TypeVarType, UninhabitedType, UnionType, ) @@ -233,6 +234,7 @@ IMMUTABLE_CUSTOM_TYPES = { "synapse.synapse_rust.push.FilteredPushRules", # This is technically not immutable, but close enough. "signedjson.types.VerifyKey", + "synapse.types.StrCollection", } # Immutable containers only if the values are also immutable. @@ -298,7 +300,7 @@ def is_cacheable( elif rt.type.fullname in MUTABLE_CONTAINER_TYPES: # Mutable containers are mutable regardless of their underlying type. - return False, None + return False, f"container {rt.type.fullname} is mutable" elif "attrs" in rt.type.metadata: # attrs classes are only cachable iff it is frozen (immutable itself) @@ -318,6 +320,9 @@ def is_cacheable( else: return False, "non-frozen attrs class" + elif rt.type.is_enum: + # We assume Enum values are immutable + return True, None else: # Ensure we fail for unknown types, these generally means that the # above code is not complete. @@ -326,6 +331,18 @@ def is_cacheable( f"Don't know how to handle {rt.type.fullname} return type instance", ) + elif isinstance(rt, TypeVarType): + # We consider TypeVars immutable if they are bound to a set of immutable + # types. + if rt.values: + for value in rt.values: + ok, note = is_cacheable(value, signature, verbose) + if not ok: + return False, f"TypeVar bound not cacheable {value}" + return True, None + + return False, "TypeVar is unbound" + elif isinstance(rt, NoneType): # None is cachable. return True, None diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index ccd464cd1c..d62d520abd 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -29,13 +29,6 @@ from synapse.handlers.sliding_sync.room_lists import ( _RoomMembershipForUser, ) from synapse.handlers.sliding_sync.store import SlidingSyncConnectionStore -from synapse.handlers.sliding_sync.types import ( - HaveSentRoomFlag, - MutablePerConnectionState, - PerConnectionState, - RoomSyncConfig, - StateValues, -) from synapse.logging.opentracing import ( SynapseTags, log_kv, @@ -57,7 +50,15 @@ from synapse.types import ( StreamKeyType, StreamToken, ) -from synapse.types.handlers import SlidingSyncConfig, SlidingSyncResult +from synapse.types.handlers.sliding_sync import ( + HaveSentRoomFlag, + MutablePerConnectionState, + PerConnectionState, + RoomSyncConfig, + SlidingSyncConfig, + SlidingSyncResult, + StateValues, +) from synapse.types.state import StateFilter from synapse.util.async_helpers import concurrently_execute from synapse.visibility import filter_events_for_client diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index a2d4f24f9c..d9f4c56e6e 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -20,11 +20,6 @@ from typing_extensions import assert_never from synapse.api.constants import AccountDataTypes, EduTypes from synapse.handlers.receipts import ReceiptEventSource -from synapse.handlers.sliding_sync.types import ( - HaveSentRoomFlag, - MutablePerConnectionState, - PerConnectionState, -) from synapse.logging.opentracing import trace from synapse.storage.databases.main.receipts import ReceiptInRoom from synapse.types import ( @@ -35,7 +30,14 @@ from synapse.types import ( StrCollection, StreamToken, ) -from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult +from synapse.types.handlers.sliding_sync import ( + HaveSentRoomFlag, + MutablePerConnectionState, + OperationType, + PerConnectionState, + SlidingSyncConfig, + SlidingSyncResult, +) if TYPE_CHECKING: from synapse.server import HomeServer diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 4718e8092b..0e6cb28524 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -40,11 +40,6 @@ from synapse.api.constants import ( ) from synapse.events import StrippedStateEvent from synapse.events.utils import parse_stripped_state_event -from synapse.handlers.sliding_sync.types import ( - HaveSentRoomFlag, - PerConnectionState, - RoomSyncConfig, -) from synapse.logging.opentracing import start_active_span, trace from synapse.storage.databases.main.state import ( ROOM_UNKNOWN_SENTINEL, @@ -61,7 +56,14 @@ from synapse.types import ( StreamToken, UserID, ) -from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult +from synapse.types.handlers.sliding_sync import ( + HaveSentRoomFlag, + OperationType, + PerConnectionState, + RoomSyncConfig, + SlidingSyncConfig, + SlidingSyncResult, +) from synapse.types.state import StateFilter if TYPE_CHECKING: @@ -279,15 +281,11 @@ class SlidingSyncRoomLists: room_id ) if existing_room_sync_config is not None: - existing_room_sync_config.combine_room_sync_config( + room_sync_config = existing_room_sync_config.combine_room_sync_config( room_sync_config ) - else: - # Make a copy so if we modify it later, it doesn't - # affect all references. - relevant_room_map[room_id] = ( - room_sync_config.deep_copy() - ) + + relevant_room_map[room_id] = room_sync_config room_ids_in_list.append(room_id) @@ -351,11 +349,13 @@ class SlidingSyncRoomLists: # and need to fetch more info about. existing_room_sync_config = relevant_room_map.get(room_id) if existing_room_sync_config is not None: - existing_room_sync_config.combine_room_sync_config( - room_sync_config + room_sync_config = ( + existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) ) - else: - relevant_room_map[room_id] = room_sync_config + + relevant_room_map[room_id] = room_sync_config # Filtered subset of `relevant_room_map` for rooms that may have updates # (in the event stream) diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py index 3b727432fb..e38fe3556f 100644 --- a/synapse/handlers/sliding_sync/store.py +++ b/synapse/handlers/sliding_sync/store.py @@ -18,13 +18,13 @@ from typing import TYPE_CHECKING, Dict, Optional, Tuple import attr from synapse.api.errors import SlidingSyncUnknownPosition -from synapse.handlers.sliding_sync.types import ( - MutablePerConnectionState, - PerConnectionState, -) from synapse.logging.opentracing import trace from synapse.types import SlidingSyncStreamToken -from synapse.types.handlers import SlidingSyncConfig +from synapse.types.handlers.sliding_sync import ( + MutablePerConnectionState, + PerConnectionState, + SlidingSyncConfig, +) if TYPE_CHECKING: pass diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index b303bb1f96..463de1a814 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -17,33 +17,9 @@ # [This file includes modifications made by New Vector Limited] # # -from enum import Enum -from typing import TYPE_CHECKING, Dict, Final, List, Mapping, Optional, Sequence, Tuple -import attr -from typing_extensions import TypedDict -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import Extra -else: - from pydantic import Extra - -from synapse.events import EventBase -from synapse.types import ( - DeviceListUpdates, - JsonDict, - JsonMapping, - Requester, - SlidingSyncStreamToken, - StreamToken, - UserID, -) -from synapse.types.rest.client import SlidingSyncBody - -if TYPE_CHECKING: - from synapse.handlers.relations import BundledAggregations +from typing import List, Optional, TypedDict class ShutdownRoomParams(TypedDict): @@ -101,335 +77,3 @@ class ShutdownRoomResponse(TypedDict): failed_to_kick_users: List[str] local_aliases: List[str] new_room_id: Optional[str] - - -class SlidingSyncConfig(SlidingSyncBody): - """ - Inherit from `SlidingSyncBody` since we need all of the same fields and add a few - extra fields that we need in the handler - """ - - user: UserID - requester: Requester - - # Pydantic config - class Config: - # By default, ignore fields that we don't recognise. - extra = Extra.ignore - # By default, don't allow fields to be reassigned after parsing. - allow_mutation = False - # Allow custom types like `UserID` to be used in the model - arbitrary_types_allowed = True - - -class OperationType(Enum): - """ - Represents the operation types in a Sliding Sync window. - - Attributes: - SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about - entries in this range. - INSERT: Sets a single entry. If the position is not empty then clients MUST move - entries to the left or the right depending on where the closest empty space is. - DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move - places. - INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for - offline support, but they should be treated as empty when additional operations - which concern indexes in the range arrive from the server. - """ - - SYNC: Final = "SYNC" - INSERT: Final = "INSERT" - DELETE: Final = "DELETE" - INVALIDATE: Final = "INVALIDATE" - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class SlidingSyncResult: - """ - The Sliding Sync result to be serialized to JSON for a response. - - Attributes: - next_pos: The next position token in the sliding window to request (next_batch). - lists: Sliding window API. A map of list key to list results. - rooms: Room subscription API. A map of room ID to room results. - extensions: Extensions API. A map of extension key to extension results. - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class RoomResult: - """ - Attributes: - name: Room name or calculated room name. - avatar: Room avatar - heroes: List of stripped membership events (containing `user_id` and optionally - `avatar_url` and `displayname`) for the users used to calculate the room name. - is_dm: Flag to specify whether the room is a direct-message room (most likely - between two people). - initial: Flag which is set when this is the first time the server is sending this - data on this connection. Clients can use this flag to replace or update - their local state. When there is an update, servers MUST omit this flag - entirely and NOT send "initial":false as this is wasteful on bandwidth. The - absence of this flag means 'false'. - unstable_expanded_timeline: Flag which is set if we're returning more historic - events due to the timeline limit having increased. See "XXX: Odd behavior" - comment ing `synapse.handlers.sliding_sync`. - required_state: The current state of the room - timeline: Latest events in the room. The last event is the most recent. - bundled_aggregations: A mapping of event ID to the bundled aggregations for - the timeline events above. This allows clients to show accurate reaction - counts (or edits, threads), even if some of the reaction events were skipped - over in a gappy sync. - stripped_state: Stripped state events (for rooms where the usre is - invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2, - absent on joined/left rooms - prev_batch: A token that can be passed as a start parameter to the - `/rooms//messages` API to retrieve earlier messages. - limited: True if there are more events than `timeline_limit` looking - backwards from the `response.pos` to the `request.pos`. - num_live: The number of timeline events which have just occurred and are not historical. - The last N events are 'live' and should be treated as such. This is mostly - useful to determine whether a given @mention event should make a noise or not. - Clients cannot rely solely on the absence of `initial: true` to determine live - events because if a room not in the sliding window bumps into the window because - of an @mention it will have `initial: true` yet contain a single live event - (with potentially other old events in the timeline). - bump_stamp: The `stream_ordering` of the last event according to the - `bump_event_types`. This helps clients sort more readily without them - needing to pull in a bunch of the timeline to determine the last activity. - `bump_event_types` is a thing because for example, we don't want display - name changes to mark the room as unread and bump it to the top. For - encrypted rooms, we just have to consider any activity as a bump because we - can't see the content and the client has to figure it out for themselves. - joined_count: The number of users with membership of join, including the client's - own user ID. (same as sync `v2 m.joined_member_count`) - invited_count: The number of users with membership of invite. (same as sync v2 - `m.invited_member_count`) - notification_count: The total number of unread notifications for this room. (same - as sync v2) - highlight_count: The number of unread notifications for this room with the highlight - flag set. (same as sync v2) - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class StrippedHero: - user_id: str - display_name: Optional[str] - avatar_url: Optional[str] - - name: Optional[str] - avatar: Optional[str] - heroes: Optional[List[StrippedHero]] - is_dm: bool - initial: bool - unstable_expanded_timeline: bool - # Should be empty for invite/knock rooms with `stripped_state` - required_state: List[EventBase] - # Should be empty for invite/knock rooms with `stripped_state` - timeline_events: List[EventBase] - bundled_aggregations: Optional[Dict[str, "BundledAggregations"]] - # Optional because it's only relevant to invite/knock rooms - stripped_state: List[JsonDict] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - prev_batch: Optional[StreamToken] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - limited: Optional[bool] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - num_live: Optional[int] - bump_stamp: int - joined_count: int - invited_count: int - notification_count: int - highlight_count: int - - def __bool__(self) -> bool: - return ( - # If this is the first time the client is seeing the room, we should not filter it out - # under any circumstance. - self.initial - # We need to let the client know if there are any new events - or bool(self.required_state) - or bool(self.timeline_events) - or bool(self.stripped_state) - ) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class SlidingWindowList: - """ - Attributes: - count: The total number of entries in the list. Always present if this list - is. - ops: The sliding list operations to perform. - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class Operation: - """ - Attributes: - op: The operation type to perform. - range: Which index positions are affected by this operation. These are - both inclusive. - room_ids: Which room IDs are affected by this operation. These IDs match - up to the positions in the `range`, so the last room ID in this list - matches the 9th index. The room data is held in a separate object. - """ - - op: OperationType - range: Tuple[int, int] - room_ids: List[str] - - count: int - ops: List[Operation] - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class Extensions: - """Responses for extensions - - Attributes: - to_device: The to-device extension (MSC3885) - e2ee: The E2EE device extension (MSC3884) - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class ToDeviceExtension: - """The to-device extension (MSC3885) - - Attributes: - next_batch: The to-device stream token the client should use - to get more results - events: A list of to-device messages for the client - """ - - next_batch: str - events: Sequence[JsonMapping] - - def __bool__(self) -> bool: - return bool(self.events) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class E2eeExtension: - """The E2EE device extension (MSC3884) - - Attributes: - device_list_updates: List of user_ids whose devices have changed or left (only - present on incremental syncs). - device_one_time_keys_count: Map from key algorithm to the number of - unclaimed one-time keys currently held on the server for this device. If - an algorithm is unlisted, the count for that algorithm is assumed to be - zero. If this entire parameter is missing, the count for all algorithms - is assumed to be zero. - device_unused_fallback_key_types: List of unused fallback key algorithms - for this device. - """ - - # Only present on incremental syncs - device_list_updates: Optional[DeviceListUpdates] - device_one_time_keys_count: Mapping[str, int] - device_unused_fallback_key_types: Sequence[str] - - def __bool__(self) -> bool: - # Note that "signed_curve25519" is always returned in key count responses - # regardless of whether we uploaded any keys for it. This is necessary until - # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. - # - # Also related: - # https://github.com/element-hq/element-android/issues/3725 and - # https://github.com/matrix-org/synapse/issues/10456 - default_otk = self.device_one_time_keys_count.get("signed_curve25519") - more_than_default_otk = len(self.device_one_time_keys_count) > 1 or ( - default_otk is not None and default_otk > 0 - ) - - return bool( - more_than_default_otk - or self.device_list_updates - or self.device_unused_fallback_key_types - ) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class AccountDataExtension: - """The Account Data extension (MSC3959) - - Attributes: - global_account_data_map: Mapping from `type` to `content` of global account - data events. - account_data_by_room_map: Mapping from room_id to mapping of `type` to - `content` of room account data events. - """ - - global_account_data_map: Mapping[str, JsonMapping] - account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] - - def __bool__(self) -> bool: - return bool( - self.global_account_data_map or self.account_data_by_room_map - ) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class ReceiptsExtension: - """The Receipts extension (MSC3960) - - Attributes: - room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral - event (type, content) - """ - - room_id_to_receipt_map: Mapping[str, JsonMapping] - - def __bool__(self) -> bool: - return bool(self.room_id_to_receipt_map) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class TypingExtension: - """The Typing Notification extension (MSC3961) - - Attributes: - room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral - event (type, content) - """ - - room_id_to_typing_map: Mapping[str, JsonMapping] - - def __bool__(self) -> bool: - return bool(self.room_id_to_typing_map) - - to_device: Optional[ToDeviceExtension] = None - e2ee: Optional[E2eeExtension] = None - account_data: Optional[AccountDataExtension] = None - receipts: Optional[ReceiptsExtension] = None - typing: Optional[TypingExtension] = None - - def __bool__(self) -> bool: - return bool( - self.to_device - or self.e2ee - or self.account_data - or self.receipts - or self.typing - ) - - next_pos: SlidingSyncStreamToken - lists: Mapping[str, SlidingWindowList] - rooms: Dict[str, RoomResult] - extensions: Extensions - - def __bool__(self) -> bool: - """Make the result appear empty if there are no updates. This is used - to tell if the notifier needs to wait for more events when polling for - events. - """ - # We don't include `self.lists` here, as a) `lists` is always non-empty even if - # there are no changes, and b) since we're sorting rooms by `stream_ordering` of - # the latest activity, anything that would cause the order to change would end - # up in `self.rooms` and cause us to send down the change. - return bool(self.rooms or self.extensions) - - @staticmethod - def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult": - "Return a new empty result" - return SlidingSyncResult( - next_pos=next_pos, - lists={}, - rooms={}, - extensions=SlidingSyncResult.Extensions(), - ) diff --git a/synapse/handlers/sliding_sync/types.py b/synapse/types/handlers/sliding_sync.py similarity index 52% rename from synapse/handlers/sliding_sync/types.py rename to synapse/types/handlers/sliding_sync.py index 003419d40a..bca1ff7b54 100644 --- a/synapse/handlers/sliding_sync/types.py +++ b/synapse/types/handlers/sliding_sync.py @@ -18,30 +18,382 @@ from collections import ChainMap from enum import Enum from typing import ( TYPE_CHECKING, + AbstractSet, Callable, Dict, Final, Generic, + List, Mapping, MutableMapping, Optional, + Sequence, Set, + Tuple, TypeVar, cast, ) import attr +from synapse._pydantic_compat import HAS_PYDANTIC_V2 from synapse.api.constants import EventTypes from synapse.types import MultiWriterStreamToken, RoomStreamToken, StrCollection, UserID -from synapse.types.handlers import SlidingSyncConfig + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import Extra +else: + from pydantic import Extra + +from synapse.events import EventBase +from synapse.types import ( + DeviceListUpdates, + JsonDict, + JsonMapping, + Requester, + SlidingSyncStreamToken, + StreamToken, +) +from synapse.types.rest.client import SlidingSyncBody if TYPE_CHECKING: - pass + from synapse.handlers.relations import BundledAggregations logger = logging.getLogger(__name__) +class SlidingSyncConfig(SlidingSyncBody): + """ + Inherit from `SlidingSyncBody` since we need all of the same fields and add a few + extra fields that we need in the handler + """ + + user: UserID + requester: Requester + + # Pydantic config + class Config: + # By default, ignore fields that we don't recognise. + extra = Extra.ignore + # By default, don't allow fields to be reassigned after parsing. + allow_mutation = False + # Allow custom types like `UserID` to be used in the model + arbitrary_types_allowed = True + + +class OperationType(Enum): + """ + Represents the operation types in a Sliding Sync window. + + Attributes: + SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about + entries in this range. + INSERT: Sets a single entry. If the position is not empty then clients MUST move + entries to the left or the right depending on where the closest empty space is. + DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move + places. + INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for + offline support, but they should be treated as empty when additional operations + which concern indexes in the range arrive from the server. + """ + + SYNC: Final = "SYNC" + INSERT: Final = "INSERT" + DELETE: Final = "DELETE" + INVALIDATE: Final = "INVALIDATE" + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SlidingSyncResult: + """ + The Sliding Sync result to be serialized to JSON for a response. + + Attributes: + next_pos: The next position token in the sliding window to request (next_batch). + lists: Sliding window API. A map of list key to list results. + rooms: Room subscription API. A map of room ID to room results. + extensions: Extensions API. A map of extension key to extension results. + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class RoomResult: + """ + Attributes: + name: Room name or calculated room name. + avatar: Room avatar + heroes: List of stripped membership events (containing `user_id` and optionally + `avatar_url` and `displayname`) for the users used to calculate the room name. + is_dm: Flag to specify whether the room is a direct-message room (most likely + between two people). + initial: Flag which is set when this is the first time the server is sending this + data on this connection. Clients can use this flag to replace or update + their local state. When there is an update, servers MUST omit this flag + entirely and NOT send "initial":false as this is wasteful on bandwidth. The + absence of this flag means 'false'. + unstable_expanded_timeline: Flag which is set if we're returning more historic + events due to the timeline limit having increased. See "XXX: Odd behavior" + comment ing `synapse.handlers.sliding_sync`. + required_state: The current state of the room + timeline: Latest events in the room. The last event is the most recent. + bundled_aggregations: A mapping of event ID to the bundled aggregations for + the timeline events above. This allows clients to show accurate reaction + counts (or edits, threads), even if some of the reaction events were skipped + over in a gappy sync. + stripped_state: Stripped state events (for rooms where the usre is + invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2, + absent on joined/left rooms + prev_batch: A token that can be passed as a start parameter to the + `/rooms//messages` API to retrieve earlier messages. + limited: True if there are more events than `timeline_limit` looking + backwards from the `response.pos` to the `request.pos`. + num_live: The number of timeline events which have just occurred and are not historical. + The last N events are 'live' and should be treated as such. This is mostly + useful to determine whether a given @mention event should make a noise or not. + Clients cannot rely solely on the absence of `initial: true` to determine live + events because if a room not in the sliding window bumps into the window because + of an @mention it will have `initial: true` yet contain a single live event + (with potentially other old events in the timeline). + bump_stamp: The `stream_ordering` of the last event according to the + `bump_event_types`. This helps clients sort more readily without them + needing to pull in a bunch of the timeline to determine the last activity. + `bump_event_types` is a thing because for example, we don't want display + name changes to mark the room as unread and bump it to the top. For + encrypted rooms, we just have to consider any activity as a bump because we + can't see the content and the client has to figure it out for themselves. + joined_count: The number of users with membership of join, including the client's + own user ID. (same as sync `v2 m.joined_member_count`) + invited_count: The number of users with membership of invite. (same as sync v2 + `m.invited_member_count`) + notification_count: The total number of unread notifications for this room. (same + as sync v2) + highlight_count: The number of unread notifications for this room with the highlight + flag set. (same as sync v2) + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class StrippedHero: + user_id: str + display_name: Optional[str] + avatar_url: Optional[str] + + name: Optional[str] + avatar: Optional[str] + heroes: Optional[List[StrippedHero]] + is_dm: bool + initial: bool + unstable_expanded_timeline: bool + # Should be empty for invite/knock rooms with `stripped_state` + required_state: List[EventBase] + # Should be empty for invite/knock rooms with `stripped_state` + timeline_events: List[EventBase] + bundled_aggregations: Optional[Dict[str, "BundledAggregations"]] + # Optional because it's only relevant to invite/knock rooms + stripped_state: List[JsonDict] + # Only optional because it won't be included for invite/knock rooms with `stripped_state` + prev_batch: Optional[StreamToken] + # Only optional because it won't be included for invite/knock rooms with `stripped_state` + limited: Optional[bool] + # Only optional because it won't be included for invite/knock rooms with `stripped_state` + num_live: Optional[int] + bump_stamp: int + joined_count: int + invited_count: int + notification_count: int + highlight_count: int + + def __bool__(self) -> bool: + return ( + # If this is the first time the client is seeing the room, we should not filter it out + # under any circumstance. + self.initial + # We need to let the client know if there are any new events + or bool(self.required_state) + or bool(self.timeline_events) + or bool(self.stripped_state) + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class SlidingWindowList: + """ + Attributes: + count: The total number of entries in the list. Always present if this list + is. + ops: The sliding list operations to perform. + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class Operation: + """ + Attributes: + op: The operation type to perform. + range: Which index positions are affected by this operation. These are + both inclusive. + room_ids: Which room IDs are affected by this operation. These IDs match + up to the positions in the `range`, so the last room ID in this list + matches the 9th index. The room data is held in a separate object. + """ + + op: OperationType + range: Tuple[int, int] + room_ids: List[str] + + count: int + ops: List[Operation] + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class Extensions: + """Responses for extensions + + Attributes: + to_device: The to-device extension (MSC3885) + e2ee: The E2EE device extension (MSC3884) + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ToDeviceExtension: + """The to-device extension (MSC3885) + + Attributes: + next_batch: The to-device stream token the client should use + to get more results + events: A list of to-device messages for the client + """ + + next_batch: str + events: Sequence[JsonMapping] + + def __bool__(self) -> bool: + return bool(self.events) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class E2eeExtension: + """The E2EE device extension (MSC3884) + + Attributes: + device_list_updates: List of user_ids whose devices have changed or left (only + present on incremental syncs). + device_one_time_keys_count: Map from key algorithm to the number of + unclaimed one-time keys currently held on the server for this device. If + an algorithm is unlisted, the count for that algorithm is assumed to be + zero. If this entire parameter is missing, the count for all algorithms + is assumed to be zero. + device_unused_fallback_key_types: List of unused fallback key algorithms + for this device. + """ + + # Only present on incremental syncs + device_list_updates: Optional[DeviceListUpdates] + device_one_time_keys_count: Mapping[str, int] + device_unused_fallback_key_types: Sequence[str] + + def __bool__(self) -> bool: + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + default_otk = self.device_one_time_keys_count.get("signed_curve25519") + more_than_default_otk = len(self.device_one_time_keys_count) > 1 or ( + default_otk is not None and default_otk > 0 + ) + + return bool( + more_than_default_otk + or self.device_list_updates + or self.device_unused_fallback_key_types + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class AccountDataExtension: + """The Account Data extension (MSC3959) + + Attributes: + global_account_data_map: Mapping from `type` to `content` of global account + data events. + account_data_by_room_map: Mapping from room_id to mapping of `type` to + `content` of room account data events. + """ + + global_account_data_map: Mapping[str, JsonMapping] + account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] + + def __bool__(self) -> bool: + return bool( + self.global_account_data_map or self.account_data_by_room_map + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ReceiptsExtension: + """The Receipts extension (MSC3960) + + Attributes: + room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral + event (type, content) + """ + + room_id_to_receipt_map: Mapping[str, JsonMapping] + + def __bool__(self) -> bool: + return bool(self.room_id_to_receipt_map) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class TypingExtension: + """The Typing Notification extension (MSC3961) + + Attributes: + room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral + event (type, content) + """ + + room_id_to_typing_map: Mapping[str, JsonMapping] + + def __bool__(self) -> bool: + return bool(self.room_id_to_typing_map) + + to_device: Optional[ToDeviceExtension] = None + e2ee: Optional[E2eeExtension] = None + account_data: Optional[AccountDataExtension] = None + receipts: Optional[ReceiptsExtension] = None + typing: Optional[TypingExtension] = None + + def __bool__(self) -> bool: + return bool( + self.to_device + or self.e2ee + or self.account_data + or self.receipts + or self.typing + ) + + next_pos: SlidingSyncStreamToken + lists: Mapping[str, SlidingWindowList] + rooms: Dict[str, RoomResult] + extensions: Extensions + + def __bool__(self) -> bool: + """Make the result appear empty if there are no updates. This is used + to tell if the notifier needs to wait for more events when polling for + events. + """ + # We don't include `self.lists` here, as a) `lists` is always non-empty even if + # there are no changes, and b) since we're sorting rooms by `stream_ordering` of + # the latest activity, anything that would cause the order to change would end + # up in `self.rooms` and cause us to send down the change. + return bool(self.rooms or self.extensions) + + @staticmethod + def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult": + "Return a new empty result" + return SlidingSyncResult( + next_pos=next_pos, + lists={}, + rooms={}, + extensions=SlidingSyncResult.Extensions(), + ) + + class StateValues: """ Understood values of the (type, state_key) tuple in `required_state`. @@ -60,7 +412,7 @@ class StateValues: # We can't freeze this class because we want to update it in place with the # de-duplicated data. -@attr.s(slots=True, auto_attribs=True) +@attr.s(slots=True, auto_attribs=True, frozen=True) class RoomSyncConfig: """ Holds the config for what data we should fetch for a room in the sync response. @@ -74,7 +426,7 @@ class RoomSyncConfig: """ timeline_limit: int - required_state_map: Dict[str, Set[str]] + required_state_map: Mapping[str, AbstractSet[str]] @classmethod def from_room_config( @@ -146,27 +498,22 @@ class RoomSyncConfig: required_state_map=required_state_map, ) - def deep_copy(self) -> "RoomSyncConfig": - required_state_map: Dict[str, Set[str]] = { - state_type: state_key_set.copy() - for state_type, state_key_set in self.required_state_map.items() - } - - return RoomSyncConfig( - timeline_limit=self.timeline_limit, - required_state_map=required_state_map, - ) - def combine_room_sync_config( self, other_room_sync_config: "RoomSyncConfig" - ) -> None: + ) -> "RoomSyncConfig": """ - Combine this `RoomSyncConfig` with another `RoomSyncConfig` and take the + Combine this `RoomSyncConfig` with another `RoomSyncConfig` and return the superset union of the two. """ + timeline_limit = self.timeline_limit + required_state_map = { + event_type: set(state_keys) + for event_type, state_keys in self.required_state_map.items() + } + # Take the highest timeline limit - if self.timeline_limit < other_room_sync_config.timeline_limit: - self.timeline_limit = other_room_sync_config.timeline_limit + if timeline_limit < other_room_sync_config.timeline_limit: + timeline_limit = other_room_sync_config.timeline_limit # Union the required state for ( @@ -175,14 +522,14 @@ class RoomSyncConfig: ) in other_room_sync_config.required_state_map.items(): # If we already have a wildcard for everything, we don't need to add # anything else - if StateValues.WILDCARD in self.required_state_map.get( + if StateValues.WILDCARD in required_state_map.get( StateValues.WILDCARD, set() ): break # If we already have a wildcard `state_key` for this `state_type`, we don't need # to add anything else - if StateValues.WILDCARD in self.required_state_map.get(state_type, set()): + if StateValues.WILDCARD in required_state_map.get(state_type, set()): continue # If we're getting wildcards for the `state_type` and `state_key`, that's @@ -191,16 +538,14 @@ class RoomSyncConfig: state_type == StateValues.WILDCARD and StateValues.WILDCARD in state_key_set ): - self.required_state_map = {state_type: {StateValues.WILDCARD}} + required_state_map = {state_type: {StateValues.WILDCARD}} # We can break, since we don't need to add anything else break for state_key in state_key_set: # If we already have a wildcard for this specific `state_key`, we don't need # to add it since the wildcard already covers it. - if state_key in self.required_state_map.get( - StateValues.WILDCARD, set() - ): + if state_key in required_state_map.get(StateValues.WILDCARD, set()): continue # If we're getting a wildcard for the `state_type`, get rid of any other @@ -211,7 +556,7 @@ class RoomSyncConfig: # Make a copy so we don't run into an error: `dictionary changed size # during iteration`, when we remove items for existing_state_type, existing_state_key_set in list( - self.required_state_map.items() + required_state_map.items() ): # Make a copy so we don't run into an error: `Set changed size during # iteration`, when we filter out and remove items @@ -221,19 +566,21 @@ class RoomSyncConfig: # If we've the left the `set()` empty, remove it from the map if existing_state_key_set == set(): - self.required_state_map.pop(existing_state_type, None) + required_state_map.pop(existing_state_type, None) # If we're getting a wildcard `state_key`, get rid of any other state_keys # for this `state_type` since the wildcard will cover it already. if state_key == StateValues.WILDCARD: - self.required_state_map[state_type] = {state_key} + required_state_map[state_type] = {state_key} break # Otherwise, just add it to the set else: - if self.required_state_map.get(state_type) is None: - self.required_state_map[state_type] = {state_key} + if required_state_map.get(state_type) is None: + required_state_map[state_type] = {state_key} else: - self.required_state_map[state_type].add(state_key) + required_state_map[state_type].add(state_key) + + return RoomSyncConfig(timeline_limit, required_state_map) def must_await_full_state( self, @@ -324,7 +671,7 @@ class HaveSentRoomFlag(Enum): LIVE = "live" -T = TypeVar("T") +T = TypeVar("T", str, RoomStreamToken, MultiWriterStreamToken) @attr.s(auto_attribs=True, slots=True, frozen=True) @@ -439,7 +786,7 @@ class MutableRoomStatusMap(RoomStatusMap[T]): self._statuses[room_id] = HaveSentRoom.previously(from_token) -@attr.s(auto_attribs=True) +@attr.s(auto_attribs=True, frozen=True) class PerConnectionState: """The per-connection state. A snapshot of what we've sent down the connection before. diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 2cf2f2982f..2ef9f665f9 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -18,7 +18,6 @@ # # import logging -from copy import deepcopy from typing import Dict, List, Optional from unittest.mock import patch @@ -47,7 +46,7 @@ from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import JsonDict, StreamToken, UserID -from synapse.types.handlers import SlidingSyncConfig +from synapse.types.handlers.sliding_sync import SlidingSyncConfig from synapse.util import Clock from tests.replication._base import BaseMultiWorkerStreamTestCase @@ -566,23 +565,11 @@ class RoomSyncConfigTestCase(TestCase): """ Combine A into B and B into A to make sure we get the same result. """ - # Since we're mutating these in place, make a copy for each of our trials - room_sync_config_a = deepcopy(a) - room_sync_config_b = deepcopy(b) + combined_config = a.combine_room_sync_config(b) + self._assert_room_config_equal(combined_config, expected, "B into A") - # Combine B into A - room_sync_config_a.combine_room_sync_config(room_sync_config_b) - - self._assert_room_config_equal(room_sync_config_a, expected, "B into A") - - # Since we're mutating these in place, make a copy for each of our trials - room_sync_config_a = deepcopy(a) - room_sync_config_b = deepcopy(b) - - # Combine A into B - room_sync_config_b.combine_room_sync_config(room_sync_config_a) - - self._assert_room_config_equal(room_sync_config_b, expected, "A into B") + combined_config = a.combine_room_sync_config(b) + self._assert_room_config_equal(combined_config, expected, "A into B") class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): From b913aaa788ac7c7cc9d48fbf293412f0837113e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Aug 2024 16:26:58 +0100 Subject: [PATCH 180/332] Sliding sync: Store the per-connection state in the database. (#17599) Based on #17600 --------- Co-authored-by: Eric Eastwood --- changelog.d/17599.misc | 1 + synapse/app/generic_worker.py | 2 + synapse/handlers/sliding_sync/__init__.py | 9 +- synapse/handlers/sliding_sync/store.py | 140 ++--- synapse/storage/database.py | 43 ++ synapse/storage/databases/main/__init__.py | 2 + .../storage/databases/main/sliding_sync.py | 491 ++++++++++++++++++ synapse/storage/engines/_base.py | 5 + synapse/storage/engines/postgres.py | 7 + synapse/storage/engines/sqlite.py | 6 + synapse/storage/schema/__init__.py | 7 +- .../main/delta/87/02_per_connection_state.sql | 81 +++ synapse/types/handlers/sliding_sync.py | 6 + .../sliding_sync/test_rooms_required_state.py | 10 +- 14 files changed, 694 insertions(+), 116 deletions(-) create mode 100644 changelog.d/17599.misc create mode 100644 synapse/storage/databases/main/sliding_sync.py create mode 100644 synapse/storage/schema/main/delta/87/02_per_connection_state.sql diff --git a/changelog.d/17599.misc b/changelog.d/17599.misc new file mode 100644 index 0000000000..2f81356d12 --- /dev/null +++ b/changelog.d/17599.misc @@ -0,0 +1 @@ +Store sliding sync per-connection state in the database. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 53f1859256..18d294f2b2 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -98,6 +98,7 @@ from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.storage.databases.main.search import SearchStore from synapse.storage.databases.main.session import SessionStore from synapse.storage.databases.main.signatures import SignatureWorkerStore +from synapse.storage.databases.main.sliding_sync import SlidingSyncStore from synapse.storage.databases.main.state import StateGroupWorkerStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.stream import StreamWorkerStore @@ -159,6 +160,7 @@ class GenericWorkerStore( SessionStore, TaskSchedulerWorkerStore, ExperimentalFeaturesStore, + SlidingSyncStore, ): # Properties that multiple storage classes define. Tell mypy what the # expected type is. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index d62d520abd..3133325809 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -100,7 +100,7 @@ class SlidingSyncHandler: self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync self.is_mine_id = hs.is_mine_id - self.connection_store = SlidingSyncConnectionStore() + self.connection_store = SlidingSyncConnectionStore(self.store) self.extensions = SlidingSyncExtensionHandler(hs) self.room_lists = SlidingSyncRoomLists(hs) @@ -221,16 +221,11 @@ class SlidingSyncHandler: # amount of time (more with round-trips and re-processing) in the end to # get everything again. previous_connection_state = ( - await self.connection_store.get_per_connection_state( + await self.connection_store.get_and_clear_connection_positions( sync_config, from_token ) ) - await self.connection_store.mark_token_seen( - sync_config=sync_config, - from_token=from_token, - ) - # Get all of the room IDs that the user should be able to see in the sync # response has_lists = sync_config.lists is not None and len(sync_config.lists) > 0 diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py index e38fe3556f..d24fccf76f 100644 --- a/synapse/handlers/sliding_sync/store.py +++ b/synapse/handlers/sliding_sync/store.py @@ -13,12 +13,12 @@ # import logging -from typing import TYPE_CHECKING, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Optional import attr -from synapse.api.errors import SlidingSyncUnknownPosition from synapse.logging.opentracing import trace +from synapse.storage.databases.main import DataStore from synapse.types import SlidingSyncStreamToken from synapse.types.handlers.sliding_sync import ( MutablePerConnectionState, @@ -61,22 +61,9 @@ class SlidingSyncConnectionStore: to mapping of room ID to `HaveSentRoom`. """ - # `(user_id, conn_id)` -> `connection_position` -> `PerConnectionState` - _connections: Dict[Tuple[str, str], Dict[int, PerConnectionState]] = attr.Factory( - dict - ) + store: "DataStore" - async def is_valid_token( - self, sync_config: SlidingSyncConfig, connection_token: int - ) -> bool: - """Return whether the connection token is valid/recognized""" - if connection_token == 0: - return True - - conn_key = self._get_connection_key(sync_config) - return connection_token in self._connections.get(conn_key, {}) - - async def get_per_connection_state( + async def get_and_clear_connection_positions( self, sync_config: SlidingSyncConfig, from_token: Optional[SlidingSyncStreamToken], @@ -86,23 +73,21 @@ class SlidingSyncConnectionStore: Raises: SlidingSyncUnknownPosition if the connection_token is unknown """ - if from_token is None: + # If this is our first request, there is no previous connection state to fetch out of the database + if from_token is None or from_token.connection_position == 0: return PerConnectionState() - connection_position = from_token.connection_position - if connection_position == 0: - # Initial sync (request without a `from_token`) starts at `0` so - # there is no existing per-connection state - return PerConnectionState() + conn_id = sync_config.conn_id or "" - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.get(conn_key, {}) - connection_state = sync_statuses.get(connection_position) + device_id = sync_config.requester.device_id + assert device_id is not None - if connection_state is None: - raise SlidingSyncUnknownPosition() - - return connection_state + return await self.store.get_and_clear_connection_positions( + sync_config.user.to_string(), + device_id, + conn_id, + from_token.connection_position, + ) @trace async def record_new_state( @@ -116,85 +101,28 @@ class SlidingSyncConnectionStore: If there are no changes to the state this may return the same token as the existing per-connection state. """ - prev_connection_token = 0 - if from_token is not None: - prev_connection_token = from_token.connection_position - if not new_connection_state.has_updates(): - return prev_connection_token + if from_token is not None: + return from_token.connection_position + else: + return 0 - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.setdefault(conn_key, {}) + # A from token with a zero connection position means there was no + # previously stored connection state, so we treat a zero the same as + # there being no previous position. + previous_connection_position = None + if from_token is not None and from_token.connection_position != 0: + previous_connection_position = from_token.connection_position - # Generate a new token, removing any existing entries in that token - # (which can happen if requests get resent). - new_store_token = prev_connection_token + 1 - sync_statuses.pop(new_store_token, None) - - # We copy the `MutablePerConnectionState` so that the inner `ChainMap`s - # don't grow forever. - sync_statuses[new_store_token] = new_connection_state.copy() - - return new_store_token - - @trace - async def mark_token_seen( - self, - sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken], - ) -> None: - """We have received a request with the given token, so we can clear out - any other tokens associated with the connection. - - If there is no from token then we have started afresh, and so we delete - all tokens associated with the device. - """ - # Clear out any tokens for the connection that doesn't match the one - # from the request. - - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.pop(conn_key, {}) - if from_token is None: - return - - sync_statuses = { - connection_token: room_statuses - for connection_token, room_statuses in sync_statuses.items() - if connection_token == from_token.connection_position - } - if sync_statuses: - self._connections[conn_key] = sync_statuses - - @staticmethod - def _get_connection_key(sync_config: SlidingSyncConfig) -> Tuple[str, str]: - """Return a unique identifier for this connection. - - The first part is simply the user ID. - - The second part is generally a combination of device ID and conn_id. - However, both these two are optional (e.g. puppet access tokens don't - have device IDs), so this handles those edge cases. - - We use this over the raw `conn_id` to avoid clashes between different - clients that use the same `conn_id`. Imagine a user uses a web client - that uses `conn_id: main_sync_loop` and an Android client that also has - a `conn_id: main_sync_loop`. - """ - - user_id = sync_config.user.to_string() - - # Only one sliding sync connection is allowed per given conn_id (empty - # or not). conn_id = sync_config.conn_id or "" - if sync_config.requester.device_id: - return (user_id, f"D/{sync_config.requester.device_id}/{conn_id}") + device_id = sync_config.requester.device_id + assert device_id is not None - if sync_config.requester.access_token_id: - # If we don't have a device, then the access token ID should be a - # stable ID. - return (user_id, f"A/{sync_config.requester.access_token_id}/{conn_id}") - - # If we have neither then its likely an AS or some weird token. Either - # way we can just fail here. - raise Exception("Cannot use sliding sync with access token type") + return await self.store.persist_per_connection_state( + sync_config.user.to_string(), + device_id, + conn_id, + previous_connection_position, + new_connection_state, + ) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 569f618193..17ff02f847 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -64,6 +64,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor, SQLQueryParameters +from synapse.types import StrCollection from synapse.util.async_helpers import delay_cancellation from synapse.util.iterutils import batch_iter @@ -1095,6 +1096,48 @@ class DatabasePool: txn.execute(sql, vals) + @staticmethod + def simple_insert_returning_txn( + txn: LoggingTransaction, + table: str, + values: Dict[str, Any], + returning: StrCollection, + ) -> Tuple[Any, ...]: + """Executes a `INSERT INTO... RETURNING...` statement (or equivalent for + SQLite versions that don't support it). + """ + + if txn.database_engine.supports_returning: + sql = "INSERT INTO %s (%s) VALUES(%s) RETURNING %s" % ( + table, + ", ".join(k for k in values.keys()), + ", ".join("?" for _ in values.keys()), + ", ".join(k for k in returning), + ) + + txn.execute(sql, list(values.values())) + row = txn.fetchone() + assert row is not None + return row + else: + # For old versions of SQLite we do a standard insert and then can + # use `last_insert_rowid` to get at the row we just inserted + DatabasePool.simple_insert_txn( + txn, + table=table, + values=values, + ) + txn.execute("SELECT last_insert_rowid()") + row = txn.fetchone() + assert row is not None + (rowid,) = row + + row = DatabasePool.simple_select_one_txn( + txn, table=table, keyvalues={"rowid": rowid}, retcols=returning + ) + assert row is not None + return row + async def simple_insert_many( self, table: str, diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 586e84f2a4..9a43ab63e8 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -33,6 +33,7 @@ from synapse.storage.database import ( LoggingDatabaseConnection, LoggingTransaction, ) +from synapse.storage.databases.main.sliding_sync import SlidingSyncStore from synapse.storage.databases.main.stats import UserSortOrder from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.types import Cursor @@ -156,6 +157,7 @@ class DataStore( LockStore, SessionStore, TaskSchedulerWorkerStore, + SlidingSyncStore, ): def __init__( self, diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py new file mode 100644 index 0000000000..dc747d7ac0 --- /dev/null +++ b/synapse/storage/databases/main/sliding_sync.py @@ -0,0 +1,491 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + + +import logging +from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, cast + +import attr + +from synapse.api.errors import SlidingSyncUnknownPosition +from synapse.logging.opentracing import log_kv +from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage.database import LoggingTransaction +from synapse.types import MultiWriterStreamToken, RoomStreamToken +from synapse.types.handlers.sliding_sync import ( + HaveSentRoom, + HaveSentRoomFlag, + MutablePerConnectionState, + PerConnectionState, + RoomStatusMap, + RoomSyncConfig, +) +from synapse.util import json_encoder +from synapse.util.caches.descriptors import cached + +if TYPE_CHECKING: + from synapse.storage.databases.main import DataStore + +logger = logging.getLogger(__name__) + + +class SlidingSyncStore(SQLBaseStore): + async def persist_per_connection_state( + self, + user_id: str, + device_id: str, + conn_id: str, + previous_connection_position: Optional[int], + per_connection_state: "MutablePerConnectionState", + ) -> int: + """Persist updates to the per-connection state for a sliding sync + connection. + + Returns: + The connection position of the newly persisted state. + """ + + # This cast is safe because the downstream code only cares about + # `store.get_id_for_instance(...)` and `StreamWorkerStore` is mixed + # alongside `SlidingSyncStore` wherever we create a store. + store = cast("DataStore", self) + + return await self.db_pool.runInteraction( + "persist_per_connection_state", + self.persist_per_connection_state_txn, + user_id=user_id, + device_id=device_id, + conn_id=conn_id, + previous_connection_position=previous_connection_position, + per_connection_state=await PerConnectionStateDB.from_state( + per_connection_state, store + ), + ) + + def persist_per_connection_state_txn( + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + conn_id: str, + previous_connection_position: Optional[int], + per_connection_state: "PerConnectionStateDB", + ) -> int: + # First we fetch (or create) the connection key associated with the + # previous connection position. + if previous_connection_position is not None: + # The `previous_connection_position` is a user-supplied value, so we + # need to make sure that the one they supplied is actually theirs. + sql = """ + SELECT connection_key + FROM sliding_sync_connection_positions + INNER JOIN sliding_sync_connections USING (connection_key) + WHERE + connection_position = ? + AND user_id = ? AND effective_device_id = ? AND conn_id = ? + """ + txn.execute( + sql, (previous_connection_position, user_id, device_id, conn_id) + ) + row = txn.fetchone() + if row is None: + raise SlidingSyncUnknownPosition() + + (connection_key,) = row + else: + # We're restarting the connection, so we clear the previous existing data we + # used to track it. We do this here to ensure that if we get lots of + # one-shot requests we don't stack up lots of entries. We have `ON DELETE + # CASCADE` setup on the dependent tables so this will clear out all the + # associated data. + self.db_pool.simple_delete_txn( + txn, + table="sliding_sync_connections", + keyvalues={ + "user_id": user_id, + "effective_device_id": device_id, + "conn_id": conn_id, + }, + ) + + (connection_key,) = self.db_pool.simple_insert_returning_txn( + txn, + table="sliding_sync_connections", + values={ + "user_id": user_id, + "effective_device_id": device_id, + "conn_id": conn_id, + "created_ts": self._clock.time_msec(), + }, + returning=("connection_key",), + ) + + # Define a new connection position for the updates + (connection_position,) = self.db_pool.simple_insert_returning_txn( + txn, + table="sliding_sync_connection_positions", + values={ + "connection_key": connection_key, + "created_ts": self._clock.time_msec(), + }, + returning=("connection_position",), + ) + + # We need to deduplicate the `required_state` JSON. We do this by + # fetching all JSON associated with the connection and comparing that + # with the updates to `required_state` + + # Dict from required state json -> required state ID + required_state_to_id: Dict[str, int] = {} + if previous_connection_position is not None: + rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_required_state", + keyvalues={"connection_key": connection_key}, + retcols=("required_state_id", "required_state"), + ) + for required_state_id, required_state in rows: + required_state_to_id[required_state] = required_state_id + + room_to_state_ids: Dict[str, int] = {} + unique_required_state: Dict[str, List[str]] = {} + for room_id, room_state in per_connection_state.room_configs.items(): + serialized_state = json_encoder.encode( + # We store the required state as a sorted list of event type / + # state key tuples. + sorted( + (event_type, state_key) + for event_type, state_keys in room_state.required_state_map.items() + for state_key in state_keys + ) + ) + + existing_state_id = required_state_to_id.get(serialized_state) + if existing_state_id is not None: + room_to_state_ids[room_id] = existing_state_id + else: + unique_required_state.setdefault(serialized_state, []).append(room_id) + + # Insert any new `required_state` json we haven't previously seen. + for serialized_required_state, room_ids in unique_required_state.items(): + (required_state_id,) = self.db_pool.simple_insert_returning_txn( + txn, + table="sliding_sync_connection_required_state", + values={ + "connection_key": connection_key, + "required_state": serialized_required_state, + }, + returning=("required_state_id",), + ) + for room_id in room_ids: + room_to_state_ids[room_id] = required_state_id + + # Copy over state from the previous connection position (we'll overwrite + # these rows with any changes). + if previous_connection_position is not None: + sql = """ + INSERT INTO sliding_sync_connection_streams + (connection_position, stream, room_id, room_status, last_token) + SELECT ?, stream, room_id, room_status, last_token + FROM sliding_sync_connection_streams + WHERE connection_position = ? + """ + txn.execute(sql, (connection_position, previous_connection_position)) + + sql = """ + INSERT INTO sliding_sync_connection_room_configs + (connection_position, room_id, timeline_limit, required_state_id) + SELECT ?, room_id, timeline_limit, required_state_id + FROM sliding_sync_connection_room_configs + WHERE connection_position = ? + """ + txn.execute(sql, (connection_position, previous_connection_position)) + + # We now upsert the changes to the various streams. + key_values = [] + value_values = [] + for room_id, have_sent_room in per_connection_state.rooms._statuses.items(): + key_values.append((connection_position, "rooms", room_id)) + value_values.append( + (have_sent_room.status.value, have_sent_room.last_token) + ) + + for room_id, have_sent_room in per_connection_state.receipts._statuses.items(): + key_values.append((connection_position, "receipts", room_id)) + value_values.append( + (have_sent_room.status.value, have_sent_room.last_token) + ) + + self.db_pool.simple_upsert_many_txn( + txn, + table="sliding_sync_connection_streams", + key_names=( + "connection_position", + "stream", + "room_id", + ), + key_values=key_values, + value_names=( + "room_status", + "last_token", + ), + value_values=value_values, + ) + + # ... and upsert changes to the room configs. + keys = [] + values = [] + for room_id, room_config in per_connection_state.room_configs.items(): + keys.append((connection_position, room_id)) + values.append((room_config.timeline_limit, room_to_state_ids[room_id])) + + self.db_pool.simple_upsert_many_txn( + txn, + table="sliding_sync_connection_room_configs", + key_names=( + "connection_position", + "room_id", + ), + key_values=keys, + value_names=( + "timeline_limit", + "required_state_id", + ), + value_values=values, + ) + + return connection_position + + @cached(iterable=True, max_entries=100000) + async def get_and_clear_connection_positions( + self, user_id: str, device_id: str, conn_id: str, connection_position: int + ) -> "PerConnectionState": + """Get the per-connection state for the given connection position.""" + + per_connection_state_db = await self.db_pool.runInteraction( + "get_and_clear_connection_positions", + self._get_and_clear_connection_positions_txn, + user_id=user_id, + device_id=device_id, + conn_id=conn_id, + connection_position=connection_position, + ) + + # This cast is safe because the downstream code only cares about + # `store.get_id_for_instance(...)` and `StreamWorkerStore` is mixed + # alongside `SlidingSyncStore` wherever we create a store. + store = cast("DataStore", self) + + return await per_connection_state_db.to_state(store) + + def _get_and_clear_connection_positions_txn( + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + conn_id: str, + connection_position: int, + ) -> "PerConnectionStateDB": + # The `previous_connection_position` is a user-supplied value, so we + # need to make sure that the one they supplied is actually theirs. + sql = """ + SELECT connection_key + FROM sliding_sync_connection_positions + INNER JOIN sliding_sync_connections USING (connection_key) + WHERE + connection_position = ? + AND user_id = ? AND effective_device_id = ? AND conn_id = ? + """ + txn.execute(sql, (connection_position, user_id, device_id, conn_id)) + row = txn.fetchone() + if row is None: + raise SlidingSyncUnknownPosition() + + (connection_key,) = row + + # Now that we have seen the client has received and used the connection + # position, we can delete all the other connection positions. + sql = """ + DELETE FROM sliding_sync_connection_positions + WHERE connection_key = ? AND connection_position != ? + """ + txn.execute(sql, (connection_key, connection_position)) + + # Fetch and create a mapping from required state ID to the actual + # required state for the connection. + rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_required_state", + keyvalues={"connection_key": connection_key}, + retcols=( + "required_state_id", + "required_state", + ), + ) + + required_state_map: Dict[int, Dict[str, Set[str]]] = {} + for row in rows: + state = required_state_map[row[0]] = {} + for event_type, state_keys in db_to_json(row[1]): + state[event_type] = set(state_keys) + + # Get all the room configs, looking up the required state from the map + # above. + room_config_rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_room_configs", + keyvalues={"connection_position": connection_position}, + retcols=( + "room_id", + "timeline_limit", + "required_state_id", + ), + ) + + room_configs: Dict[str, RoomSyncConfig] = {} + for ( + room_id, + timeline_limit, + required_state_id, + ) in room_config_rows: + room_configs[room_id] = RoomSyncConfig( + timeline_limit=timeline_limit, + required_state_map=required_state_map[required_state_id], + ) + + # Now look up the per-room stream data. + rooms: Dict[str, HaveSentRoom[str]] = {} + receipts: Dict[str, HaveSentRoom[str]] = {} + + receipt_rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_streams", + keyvalues={"connection_position": connection_position}, + retcols=( + "stream", + "room_id", + "room_status", + "last_token", + ), + ) + for stream, room_id, room_status, last_token in receipt_rows: + have_sent_room: HaveSentRoom[str] = HaveSentRoom( + status=HaveSentRoomFlag(room_status), last_token=last_token + ) + if stream == "rooms": + rooms[room_id] = have_sent_room + elif stream == "receipts": + receipts[room_id] = have_sent_room + else: + # For forwards compatibility we ignore unknown streams, as in + # future we want to be able to easily add more stream types. + logger.warning("Unrecognized sliding sync stream in DB %r", stream) + + return PerConnectionStateDB( + rooms=RoomStatusMap(rooms), + receipts=RoomStatusMap(receipts), + room_configs=room_configs, + ) + + +@attr.s(auto_attribs=True, frozen=True) +class PerConnectionStateDB: + """An equivalent to `PerConnectionState` that holds data in a format stored + in the DB. + + The principle difference is that the tokens for the different streams are + serialized to strings. + + When persisting this *only* contains updates to the state. + """ + + rooms: "RoomStatusMap[str]" + receipts: "RoomStatusMap[str]" + + room_configs: Mapping[str, "RoomSyncConfig"] + + @staticmethod + async def from_state( + per_connection_state: "MutablePerConnectionState", store: "DataStore" + ) -> "PerConnectionStateDB": + """Convert from a standard `PerConnectionState`""" + rooms = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await status.last_token.to_string(store) + if status.last_token is not None + else None + ), + ) + for room_id, status in per_connection_state.rooms.get_updates().items() + } + + receipts = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await status.last_token.to_string(store) + if status.last_token is not None + else None + ), + ) + for room_id, status in per_connection_state.receipts.get_updates().items() + } + + log_kv( + { + "rooms": rooms, + "receipts": receipts, + "room_configs": per_connection_state.room_configs.maps[0], + } + ) + + return PerConnectionStateDB( + rooms=RoomStatusMap(rooms), + receipts=RoomStatusMap(receipts), + room_configs=per_connection_state.room_configs.maps[0], + ) + + async def to_state(self, store: "DataStore") -> "PerConnectionState": + """Convert into a standard `PerConnectionState`""" + rooms = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await RoomStreamToken.parse(store, status.last_token) + if status.last_token is not None + else None + ), + ) + for room_id, status in self.rooms._statuses.items() + } + + receipts = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await MultiWriterStreamToken.parse(store, status.last_token) + if status.last_token is not None + else None + ), + ) + for room_id, status in self.receipts._statuses.items() + } + + return PerConnectionState( + rooms=RoomStatusMap(rooms), + receipts=RoomStatusMap(receipts), + room_configs=self.room_configs, + ) diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index ad222e7e2d..9d82c59384 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -28,6 +28,11 @@ if TYPE_CHECKING: from synapse.storage.database import LoggingDatabaseConnection +# A string that will be replaced with the appropriate auto increment directive +# for the database engine, expands to an auto incrementing integer primary key. +AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER = "$%AUTO_INCREMENT_PRIMARY_KEY%$" + + class IsolationLevel(IntEnum): READ_COMMITTED: int = 1 REPEATABLE_READ: int = 2 diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 90641d5a18..8c8c6d0414 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -25,6 +25,7 @@ from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, Tuple, cast import psycopg2.extensions from synapse.storage.engines._base import ( + AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, BaseDatabaseEngine, IncorrectDatabaseSetup, IsolationLevel, @@ -256,4 +257,10 @@ class PostgresEngine( executing the script in its own transaction. The script transaction is left open and it is the responsibility of the caller to commit it. """ + # Replace auto increment placeholder with the appropriate directive + script = script.replace( + AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, + "BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY", + ) + cursor.execute(f"COMMIT; BEGIN TRANSACTION; {script}") diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index b11094c5c1..9d1795ebe5 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -25,6 +25,7 @@ import threading from typing import TYPE_CHECKING, Any, List, Mapping, Optional from synapse.storage.engines import BaseDatabaseEngine +from synapse.storage.engines._base import AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER from synapse.storage.types import Cursor if TYPE_CHECKING: @@ -168,6 +169,11 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]): > first. No other implicit transaction control is performed; any transaction > control must be added to sql_script. """ + # Replace auto increment placeholder with the appropriate directive + script = script.replace( + AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, "INTEGER PRIMARY KEY AUTOINCREMENT" + ) + # The implementation of `executescript` can be found at # https://github.com/python/cpython/blob/3.11/Modules/_sqlite/cursor.c#L1035. cursor.executescript(f"BEGIN TRANSACTION; {script}") diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 581d00346b..3929fd235c 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -19,7 +19,7 @@ # # -SCHEMA_VERSION = 86 # remember to update the list below when updating +SCHEMA_VERSION = 87 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -142,6 +142,11 @@ Changes in SCHEMA_VERSION = 85 Changes in SCHEMA_VERSION = 86 - Add a column `authenticated` to the tables `local_media_repository` and `remote_media_cache` + +Changes in SCHEMA_VERSION = 87 + - Add tables for storing the per-connection state for sliding sync requests: + sliding_sync_connections, sliding_sync_connection_positions, sliding_sync_connection_required_state, + sliding_sync_connection_room_configs, sliding_sync_connection_streams """ diff --git a/synapse/storage/schema/main/delta/87/02_per_connection_state.sql b/synapse/storage/schema/main/delta/87/02_per_connection_state.sql new file mode 100644 index 0000000000..59bc14a2c9 --- /dev/null +++ b/synapse/storage/schema/main/delta/87/02_per_connection_state.sql @@ -0,0 +1,81 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + + +-- Table to track active sliding sync connections. +-- +-- A new connection will be created for every sliding sync request without a +-- `since` token for a given `conn_id` for a device.# +-- +-- Once a new connection is created and used we delete all other connections for +-- the `conn_id`. +CREATE TABLE sliding_sync_connections( + connection_key $%AUTO_INCREMENT_PRIMARY_KEY%$, + user_id TEXT NOT NULL, + -- Generally the device ID, but may be something else for e.g. puppeted accounts. + effective_device_id TEXT NOT NULL, + conn_id TEXT NOT NULL, + created_ts BIGINT NOT NULL +); + +CREATE INDEX sliding_sync_connections_idx ON sliding_sync_connections(user_id, effective_device_id, conn_id); +CREATE INDEX sliding_sync_connections_ts_idx ON sliding_sync_connections(created_ts); + +-- We track per-connection state by associating changes to the state with +-- connection positions. This ensures that we correctly track state even if we +-- see retries of requests. +-- +-- If the client starts a "new" connection (by not specifying a since token), +-- we'll clear out the other connections (to ensure that we don't end up with +-- lots of connection keys). +CREATE TABLE sliding_sync_connection_positions( + connection_position $%AUTO_INCREMENT_PRIMARY_KEY%$, + connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE, + created_ts BIGINT NOT NULL +); + +CREATE INDEX sliding_sync_connection_positions_key ON sliding_sync_connection_positions(connection_key); +CREATE INDEX sliding_sync_connection_positions_ts_idx ON sliding_sync_connection_positions(created_ts); + + +-- To save space we deduplicate the `required_state` json by assigning IDs to +-- different values. +CREATE TABLE sliding_sync_connection_required_state( + required_state_id $%AUTO_INCREMENT_PRIMARY_KEY%$, + connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE, + required_state TEXT NOT NULL -- We store this as a json list of event type / state key tuples. +); + +CREATE INDEX sliding_sync_connection_required_state_conn_pos ON sliding_sync_connection_required_state(connection_key); + + +-- Stores the room configs we have seen for rooms in a connection. +CREATE TABLE sliding_sync_connection_room_configs( + connection_position BIGINT NOT NULL REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE, + room_id TEXT NOT NULL, + timeline_limit BIGINT NOT NULL, + required_state_id BIGINT NOT NULL REFERENCES sliding_sync_connection_required_state(required_state_id) +); + +CREATE UNIQUE INDEX sliding_sync_connection_room_configs_idx ON sliding_sync_connection_room_configs(connection_position, room_id); + +-- Stores what data we have sent for given streams down given connections. +CREATE TABLE sliding_sync_connection_streams( + connection_position BIGINT NOT NULL REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE, + stream TEXT NOT NULL, -- e.g. "events" or "receipts" + room_id TEXT NOT NULL, + room_status TEXT NOT NULL, -- "live" or "previously", i.e. the `HaveSentRoomFlag` value + last_token TEXT -- For "previously" the token for the stream we have sent up to. +); + +CREATE UNIQUE INDEX sliding_sync_connection_streams_idx ON sliding_sync_connection_streams(connection_position, room_id, stream); diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index bca1ff7b54..84a88bf784 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -730,6 +730,9 @@ class RoomStatusMap(Generic[T]): return RoomStatusMap(statuses=dict(self._statuses)) + def __len__(self) -> int: + return len(self._statuses) + class MutableRoomStatusMap(RoomStatusMap[T]): """A mutable version of `RoomStatusMap`""" @@ -831,6 +834,9 @@ class PerConnectionState: room_configs=dict(self.room_configs), ) + def __len__(self) -> int: + return len(self.rooms) + len(self.receipts) + len(self.room_configs) + @attr.s(auto_attribs=True) class MutablePerConnectionState(PerConnectionState): diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index 823e7db569..498c921cbd 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -191,8 +191,14 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): } _, from_token = self.do_sync(sync_body, tok=user1_tok) - # Reset the in-memory cache - self.hs.get_sliding_sync_handler().connection_store._connections.clear() + # Reset the positions + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_connections", + keyvalues={"user_id": user1_id}, + desc="clear_sliding_sync_connections_cache", + ) + ) # Make the Sliding Sync request channel = self.make_request( From 53a3783750789d2627ff6e57857f07216bef9449 Mon Sep 17 00:00:00 2001 From: Michael Telatynski <7t3chguy@gmail.com> Date: Fri, 30 Aug 2024 13:52:57 +0100 Subject: [PATCH 181/332] Use custom stage UIA error for MAS cross-signing reset (#17509) Rather than 501 M_UNRECOGNISED Client side implementation at https://github.com/matrix-org/matrix-react-sdk/pull/12892/ --- changelog.d/17509.feature | 1 + synapse/rest/client/auth.py | 13 ++++++++++- synapse/rest/client/keys.py | 30 +++++++++++++++++++------ tests/handlers/test_oauth_delegation.py | 2 +- tests/rest/client/test_keys.py | 12 +++------- 5 files changed, 40 insertions(+), 18 deletions(-) create mode 100644 changelog.d/17509.feature diff --git a/changelog.d/17509.feature b/changelog.d/17509.feature new file mode 100644 index 0000000000..6d639ceb98 --- /dev/null +++ b/changelog.d/17509.feature @@ -0,0 +1 @@ +Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py index 4221f35937..32eeecd662 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py @@ -27,7 +27,7 @@ from twisted.web.server import Request from synapse.api.constants import LoginType from synapse.api.errors import LoginError, SynapseError from synapse.api.urls import CLIENT_API_PREFIX -from synapse.http.server import HttpServer, respond_with_html +from synapse.http.server import HttpServer, respond_with_html, respond_with_redirect from synapse.http.servlet import RestServlet, parse_string from synapse.http.site import SynapseRequest @@ -66,6 +66,17 @@ class AuthRestServlet(RestServlet): if not session: raise SynapseError(400, "No session supplied") + if ( + self.hs.config.experimental.msc3861.enabled + and stagetype == "org.matrix.cross_signing_reset" + ): + config = self.hs.config.experimental.msc3861 + if config.account_management_url is not None: + url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset" + else: + url = config.issuer + respond_with_redirect(request, str.encode(url)) + if stagetype == LoginType.RECAPTCHA: html = self.recaptcha_template.render( session=session, diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index eddad7d5b8..a33eb6c1f2 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -23,10 +23,13 @@ import logging import re from collections import Counter -from http import HTTPStatus from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple -from synapse.api.errors import Codes, InvalidAPICallError, SynapseError +from synapse.api.errors import ( + InteractiveAuthIncompleteError, + InvalidAPICallError, + SynapseError, +) from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -409,11 +412,24 @@ class SigningKeyUploadServlet(RestServlet): else: url = config.issuer - raise SynapseError( - HTTPStatus.NOT_IMPLEMENTED, - "To reset your end-to-end encryption cross-signing identity, " - f"you first need to approve it at {url} and then try again.", - Codes.UNRECOGNIZED, + # We use a dummy session ID as this isn't really a UIA flow, but we + # reuse the same API shape for better client compatibility. + raise InteractiveAuthIncompleteError( + "dummy", + { + "session": "dummy", + "flows": [ + {"stages": ["org.matrix.cross_signing_reset"]}, + ], + "params": { + "org.matrix.cross_signing_reset": { + "url": url, + }, + }, + "msg": "To reset your end-to-end encryption cross-signing " + f"identity, you first need to approve it at {url} and " + "then try again.", + }, ) else: # Without MSC3861, we require UIA. diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 036c539db2..5b5dc713d1 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -550,7 +550,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): access_token="mockAccessToken", ) - self.assertEqual(channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) def expect_unauthorized( self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index 8bbd109092..d9a210b616 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -315,9 +315,7 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase): "master_key": master_key2, }, ) - self.assertEqual( - channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body - ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) # Pretend that MAS did UIA and allowed us to replace the master key. channel = self.make_request( @@ -349,9 +347,7 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase): "master_key": master_key3, }, ) - self.assertEqual( - channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body - ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) # Pretend that MAS did UIA and allowed us to replace the master key. channel = self.make_request( @@ -376,6 +372,4 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase): "master_key": master_key3, }, ) - self.assertEqual( - channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body - ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) From 48303fcbccd50798168940f303364f6b0d25415d Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 30 Aug 2024 16:04:08 +0200 Subject: [PATCH 182/332] MSC3861: load the issuer and account management URLs from OIDC discovery (#17407) This will help mitigating any discrepancies between the issuer configured and the one returned by the OIDC provider. This also removes the need for configuring the `account_management_url` explicitely, as it will now be loaded from the OIDC discovery, as per MSC2965. Because we may now fetch stuff for the .well-known/matrix/client endpoint, this also transforms the client well-known resource to be asynchronous. --- changelog.d/17407.misc | 1 + synapse/api/auth/msc3861_delegated.py | 33 ++++++++++++++++++++++-- synapse/rest/client/auth.py | 16 ++++++++---- synapse/rest/client/auth_issuer.py | 10 ++++++-- synapse/rest/client/keys.py | 16 ++++++++---- synapse/rest/client/login.py | 2 +- synapse/rest/well_known.py | 37 +++++++++++++++------------ tests/rest/client/test_auth_issuer.py | 20 ++++++++++++++- tests/rest/test_well_known.py | 37 +++++++++++++++++---------- 9 files changed, 126 insertions(+), 46 deletions(-) create mode 100644 changelog.d/17407.misc diff --git a/changelog.d/17407.misc b/changelog.d/17407.misc new file mode 100644 index 0000000000..9ed6e61a5b --- /dev/null +++ b/changelog.d/17407.misc @@ -0,0 +1 @@ +MSC3861: load the issuer and account management URLs from OIDC discovery. diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 7361666c77..6bd845c7e3 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -121,7 +121,9 @@ class MSC3861DelegatedAuth(BaseAuth): self._hostname = hs.hostname self._admin_token = self._config.admin_token - self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) + self._issuer_metadata = RetryOnExceptionCachedCall[OpenIDProviderMetadata]( + self._load_metadata + ) if isinstance(auth_method, PrivateKeyJWTWithKid): # Use the JWK as the client secret when using the private_key_jwt method @@ -145,6 +147,33 @@ class MSC3861DelegatedAuth(BaseAuth): # metadata.validate_introspection_endpoint() return metadata + async def issuer(self) -> str: + """ + Get the configured issuer + + This will use the issuer value set in the metadata, + falling back to the one set in the config if not set in the metadata + """ + metadata = await self._issuer_metadata.get() + return metadata.issuer or self._config.issuer + + async def account_management_url(self) -> Optional[str]: + """ + Get the configured account management URL + + This will discover the account management URL from the issuer if it's not set in the config + """ + if self._config.account_management_url is not None: + return self._config.account_management_url + + try: + metadata = await self._issuer_metadata.get() + return metadata.get("account_management_uri", None) + # We don't want to raise here if we can't load the metadata + except Exception: + logger.warning("Failed to load metadata:", exc_info=True) + return None + async def _introspection_endpoint(self) -> str: """ Returns the introspection endpoint of the issuer @@ -154,7 +183,7 @@ class MSC3861DelegatedAuth(BaseAuth): if self._config.introspection_endpoint is not None: return self._config.introspection_endpoint - metadata = await self._load_metadata() + metadata = await self._issuer_metadata.get() return metadata.get("introspection_endpoint") async def _introspect_token(self, token: str) -> IntrospectionToken: diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py index 32eeecd662..b8dca7c797 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py @@ -20,7 +20,7 @@ # import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, cast from twisted.web.server import Request @@ -70,11 +70,17 @@ class AuthRestServlet(RestServlet): self.hs.config.experimental.msc3861.enabled and stagetype == "org.matrix.cross_signing_reset" ): - config = self.hs.config.experimental.msc3861 - if config.account_management_url is not None: - url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset" + # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self.auth) + + url = await auth.account_management_url() + if url is not None: + url = f"{url}?action=org.matrix.cross_signing_reset" else: - url = config.issuer + url = await auth.issuer() respond_with_redirect(request, str.encode(url)) if stagetype == LoginType.RECAPTCHA: diff --git a/synapse/rest/client/auth_issuer.py b/synapse/rest/client/auth_issuer.py index 77b9720956..acd0399d85 100644 --- a/synapse/rest/client/auth_issuer.py +++ b/synapse/rest/client/auth_issuer.py @@ -13,7 +13,7 @@ # limitations under the License. import logging import typing -from typing import Tuple +from typing import Tuple, cast from synapse.api.errors import Codes, SynapseError from synapse.http.server import HttpServer @@ -43,10 +43,16 @@ class AuthIssuerServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self._config = hs.config + self._auth = hs.get_auth() async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if self._config.experimental.msc3861.enabled: - return 200, {"issuer": self._config.experimental.msc3861.issuer} + # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self._auth) + return 200, {"issuer": await auth.issuer()} else: # Wouldn't expect this to be reached: the servelet shouldn't have been # registered. Still, fail gracefully if we are registered for some reason. diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index a33eb6c1f2..7025662fdc 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -23,7 +23,7 @@ import logging import re from collections import Counter -from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, cast from synapse.api.errors import ( InteractiveAuthIncompleteError, @@ -406,11 +406,17 @@ class SigningKeyUploadServlet(RestServlet): # explicitly mark the master key as replaceable. if self.hs.config.experimental.msc3861.enabled: if not master_key_updatable_without_uia: - config = self.hs.config.experimental.msc3861 - if config.account_management_url is not None: - url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset" + # If MSC3861 is enabled, we can assume self.auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self.auth) + + uri = await auth.account_management_url() + if uri is not None: + url = f"{uri}?action=org.matrix.cross_signing_reset" else: - url = config.issuer + url = await auth.issuer() # We use a dummy session ID as this isn't really a UIA flow, but we # reuse the same API shape for better client compatibility. diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index ae691bcdba..03b1e7edc4 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -268,7 +268,7 @@ class LoginRestServlet(RestServlet): approval_notice_medium=ApprovalNoticeMedium.NONE, ) - well_known_data = self._well_known_builder.get_well_known() + well_known_data = await self._well_known_builder.get_well_known() if well_known_data: result["well_known"] = well_known_data return 200, result diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index d0ca8ca46b..989e570671 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -18,12 +18,13 @@ # # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Optional, Tuple, cast from twisted.web.resource import Resource from twisted.web.server import Request -from synapse.http.server import set_cors_headers +from synapse.api.errors import NotFoundError +from synapse.http.server import DirectServeJsonResource from synapse.http.site import SynapseRequest from synapse.types import JsonDict from synapse.util import json_encoder @@ -38,8 +39,9 @@ logger = logging.getLogger(__name__) class WellKnownBuilder: def __init__(self, hs: "HomeServer"): self._config = hs.config + self._auth = hs.get_auth() - def get_well_known(self) -> Optional[JsonDict]: + async def get_well_known(self) -> Optional[JsonDict]: if not self._config.server.serve_client_wellknown: return None @@ -52,13 +54,20 @@ class WellKnownBuilder: # We use the MSC3861 values as they are used by multiple MSCs if self._config.experimental.msc3861.enabled: + # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self._auth) + result["org.matrix.msc2965.authentication"] = { - "issuer": self._config.experimental.msc3861.issuer + "issuer": await auth.issuer(), } - if self._config.experimental.msc3861.account_management_url is not None: + account_management_url = await auth.account_management_url() + if account_management_url is not None: result["org.matrix.msc2965.authentication"][ "account" - ] = self._config.experimental.msc3861.account_management_url + ] = account_management_url if self._config.server.extra_well_known_client_content: for ( @@ -71,26 +80,22 @@ class WellKnownBuilder: return result -class ClientWellKnownResource(Resource): +class ClientWellKnownResource(DirectServeJsonResource): """A Twisted web resource which renders the .well-known/matrix/client file""" isLeaf = 1 def __init__(self, hs: "HomeServer"): - Resource.__init__(self) + super().__init__() self._well_known_builder = WellKnownBuilder(hs) - def render_GET(self, request: SynapseRequest) -> bytes: - set_cors_headers(request) - r = self._well_known_builder.get_well_known() + async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + r = await self._well_known_builder.get_well_known() if not r: - request.setResponseCode(404) - request.setHeader(b"Content-Type", b"text/plain") - return b".well-known not available" + raise NotFoundError(".well-known not available") logger.debug("returning: %s", r) - request.setHeader(b"Content-Type", b"application/json") - return json_encoder.encode(r).encode("utf-8") + return 200, r class ServerWellKnownResource(Resource): diff --git a/tests/rest/client/test_auth_issuer.py b/tests/rest/client/test_auth_issuer.py index 964baeec32..299475a35c 100644 --- a/tests/rest/client/test_auth_issuer.py +++ b/tests/rest/client/test_auth_issuer.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus +from unittest.mock import AsyncMock from synapse.rest.client import auth_issuer @@ -50,10 +51,27 @@ class AuthIssuerTestCase(HomeserverTestCase): } ) def test_returns_issuer_when_oidc_enabled(self) -> None: - # Make an unauthenticated request for the discovery info. + # Patch the HTTP client to return the issuer metadata + req_mock = AsyncMock(return_value={"issuer": ISSUER}) + self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign] + channel = self.make_request( "GET", "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer", ) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual(channel.json_body, {"issuer": ISSUER}) + + req_mock.assert_called_with("https://account.example.com/.well-known/openid-configuration") + req_mock.reset_mock() + + # Second call it should use the cached value + channel = self.make_request( + "GET", + "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer", + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.json_body, {"issuer": ISSUER}) + req_mock.assert_not_called() diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index e166c13bc1..ac992766e8 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -17,6 +17,8 @@ # [This file includes modifications made by New Vector Limited] # # +from unittest.mock import AsyncMock + from twisted.web.resource import Resource from synapse.rest.well_known import well_known_resource @@ -112,7 +114,6 @@ class WellKnownTests(unittest.HomeserverTestCase): "msc3861": { "enabled": True, "issuer": "https://issuer", - "account_management_url": "https://my-account.issuer", "client_id": "id", "client_auth_method": "client_secret_post", "client_secret": "secret", @@ -122,18 +123,26 @@ class WellKnownTests(unittest.HomeserverTestCase): } ) def test_client_well_known_msc3861_oauth_delegation(self) -> None: - channel = self.make_request( - "GET", "/.well-known/matrix/client", shorthand=False - ) + # Patch the HTTP client to return the issuer metadata + req_mock = AsyncMock(return_value={"issuer": "https://issuer", "account_management_uri": "https://my-account.issuer"}) + self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign] - self.assertEqual(channel.code, 200) - self.assertEqual( - channel.json_body, - { - "m.homeserver": {"base_url": "https://homeserver/"}, - "org.matrix.msc2965.authentication": { - "issuer": "https://issuer", - "account": "https://my-account.issuer", + for _ in range(2): + channel = self.make_request( + "GET", "/.well-known/matrix/client", shorthand=False + ) + + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body, + { + "m.homeserver": {"base_url": "https://homeserver/"}, + "org.matrix.msc2965.authentication": { + "issuer": "https://issuer", + "account": "https://my-account.issuer", + }, }, - }, - ) + ) + + # It should have been called exactly once, because it gets cached + req_mock.assert_called_once_with("https://issuer/.well-known/openid-configuration") From 26c1330764911262a6ee16069b17ba27d33871a9 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 30 Aug 2024 10:07:46 +0200 Subject: [PATCH 183/332] Replace isort and black with ruff (#17620) Ruff now has decent parity with black and isort, so this is going to just save us a bunch of time --- .github/workflows/fix_lint.yaml | 14 +- .github/workflows/tests.yml | 11 +- changelog.d/17620.misc | 1 + docs/code_style.md | 4 +- poetry.lock | 126 ++-------- pylint.cfg | 280 --------------------- pyproject.toml | 38 ++- scripts-dev/lint.sh | 9 +- synapse/rest/key/v2/remote_key_resource.py | 2 +- synapse/storage/_base.py | 7 +- synmark/__init__.py | 4 +- 11 files changed, 56 insertions(+), 440 deletions(-) create mode 100644 changelog.d/17620.misc delete mode 100644 pylint.cfg diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml index f1e35fcd99..5970b4e826 100644 --- a/.github/workflows/fix_lint.yaml +++ b/.github/workflows/fix_lint.yaml @@ -29,17 +29,9 @@ jobs: with: install-project: "false" - - name: Import order (isort) + - name: Run ruff continue-on-error: true - run: poetry run isort . - - - name: Code style (black) - continue-on-error: true - run: poetry run black . - - - name: Semantic checks (ruff) - continue-on-error: true - run: poetry run ruff --fix . + run: poetry run ruff check --fix . - run: cargo clippy --all-features --fix -- -D warnings continue-on-error: true @@ -49,4 +41,4 @@ jobs: - uses: stefanzweifel/git-auto-commit-action@v5 with: - commit_message: "Attempt to fix linting" + commit_message: "Attempt to fix linting" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 730f8552d9..add046ec6a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -131,15 +131,8 @@ jobs: with: install-project: "false" - - name: Import order (isort) - run: poetry run isort --check --diff . - - - name: Code style (black) - run: poetry run black --check --diff . - - - name: Semantic checks (ruff) - # --quiet suppresses the update check. - run: poetry run ruff check --quiet . + - name: Check style + run: poetry run ruff check --output-format=github . lint-mypy: runs-on: ubuntu-latest diff --git a/changelog.d/17620.misc b/changelog.d/17620.misc new file mode 100644 index 0000000000..f583cdcb38 --- /dev/null +++ b/changelog.d/17620.misc @@ -0,0 +1 @@ +Replace `isort` and `black with `ruff`. diff --git a/docs/code_style.md b/docs/code_style.md index 026001b8a3..c28aaadad0 100644 --- a/docs/code_style.md +++ b/docs/code_style.md @@ -8,9 +8,7 @@ errors in code. The necessary tools are: -- [black](https://black.readthedocs.io/en/stable/), a source code formatter; -- [isort](https://pycqa.github.io/isort/), which organises each file's imports; -- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors; and +- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors and enforce a consistent style; and - [mypy](https://mypy.readthedocs.io/en/stable/), a type checker. See [the contributing guide](development/contributing_guide.md#run-the-linters) for instructions diff --git a/poetry.lock b/poetry.lock index 0fffa8e9ba..731adff9d4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -105,52 +105,6 @@ files = [ tests = ["pytest (>=3.2.1,!=3.3.0)"] typecheck = ["mypy"] -[[package]] -name = "black" -version = "24.8.0" -description = "The uncompromising code formatter." -optional = false -python-versions = ">=3.8" -files = [ - {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"}, - {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"}, - {file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"}, - {file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"}, - {file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"}, - {file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"}, - {file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"}, - {file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"}, - {file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"}, - {file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"}, - {file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"}, - {file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"}, - {file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"}, - {file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"}, - {file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"}, - {file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"}, - {file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"}, - {file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"}, - {file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"}, - {file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"}, - {file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"}, - {file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"}, -] - -[package.dependencies] -click = ">=8.0.0" -mypy-extensions = ">=0.4.3" -packaging = ">=22.0" -pathspec = ">=0.9.0" -platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} - -[package.extras] -colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] -jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] -uvloop = ["uvloop (>=0.15.2)"] - [[package]] name = "bleach" version = "6.1.0" @@ -832,20 +786,6 @@ tomli = {version = "*", markers = "python_version < \"3.11\""} [package.extras] scripts = ["click (>=6.0)"] -[[package]] -name = "isort" -version = "5.13.2" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, - {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, -] - -[package.extras] -colors = ["colorama (>=0.4.6)"] - [[package]] name = "jaeger-client" version = "4.8.0" @@ -1494,17 +1434,6 @@ files = [ [package.extras] dev = ["jinja2"] -[[package]] -name = "pathspec" -version = "0.11.1" -description = "Utility library for gitignore style pattern matching of file paths." -optional = false -python-versions = ">=3.7" -files = [ - {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, - {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, -] - [[package]] name = "phonenumbers" version = "8.13.44" @@ -1638,21 +1567,6 @@ files = [ {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, ] -[[package]] -name = "platformdirs" -version = "3.1.1" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -optional = false -python-versions = ">=3.7" -files = [ - {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, - {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, -] - -[package.extras] -docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] - [[package]] name = "prometheus-client" version = "0.20.0" @@ -2354,29 +2268,29 @@ files = [ [[package]] name = "ruff" -version = "0.5.5" +version = "0.6.2" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.5.5-py3-none-linux_armv6l.whl", hash = "sha256:605d589ec35d1da9213a9d4d7e7a9c761d90bba78fc8790d1c5e65026c1b9eaf"}, - {file = "ruff-0.5.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00817603822a3e42b80f7c3298c8269e09f889ee94640cd1fc7f9329788d7bf8"}, - {file = "ruff-0.5.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:187a60f555e9f865a2ff2c6984b9afeffa7158ba6e1eab56cb830404c942b0f3"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe26fc46fa8c6e0ae3f47ddccfbb136253c831c3289bba044befe68f467bfb16"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad25dd9c5faac95c8e9efb13e15803cd8bbf7f4600645a60ffe17c73f60779b"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f70737c157d7edf749bcb952d13854e8f745cec695a01bdc6e29c29c288fc36e"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:cfd7de17cef6ab559e9f5ab859f0d3296393bc78f69030967ca4d87a541b97a0"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a09b43e02f76ac0145f86a08e045e2ea452066f7ba064fd6b0cdccb486f7c3e7"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0b856cb19c60cd40198be5d8d4b556228e3dcd545b4f423d1ad812bfdca5884"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3687d002f911e8a5faf977e619a034d159a8373514a587249cc00f211c67a091"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ac9dc814e510436e30d0ba535f435a7f3dc97f895f844f5b3f347ec8c228a523"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:af9bdf6c389b5add40d89b201425b531e0a5cceb3cfdcc69f04d3d531c6be74f"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d40a8533ed545390ef8315b8e25c4bb85739b90bd0f3fe1280a29ae364cc55d8"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:cab904683bf9e2ecbbe9ff235bfe056f0eba754d0168ad5407832928d579e7ab"}, - {file = "ruff-0.5.5-py3-none-win32.whl", hash = "sha256:696f18463b47a94575db635ebb4c178188645636f05e934fdf361b74edf1bb2d"}, - {file = "ruff-0.5.5-py3-none-win_amd64.whl", hash = "sha256:50f36d77f52d4c9c2f1361ccbfbd09099a1b2ea5d2b2222c586ab08885cf3445"}, - {file = "ruff-0.5.5-py3-none-win_arm64.whl", hash = "sha256:3191317d967af701f1b73a31ed5788795936e423b7acce82a2b63e26eb3e89d6"}, - {file = "ruff-0.5.5.tar.gz", hash = "sha256:cc5516bdb4858d972fbc31d246bdb390eab8df1a26e2353be2dbc0c2d7f5421a"}, + {file = "ruff-0.6.2-py3-none-linux_armv6l.whl", hash = "sha256:5c8cbc6252deb3ea840ad6a20b0f8583caab0c5ef4f9cca21adc5a92b8f79f3c"}, + {file = "ruff-0.6.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:17002fe241e76544448a8e1e6118abecbe8cd10cf68fde635dad480dba594570"}, + {file = "ruff-0.6.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3dbeac76ed13456f8158b8f4fe087bf87882e645c8e8b606dd17b0b66c2c1158"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:094600ee88cda325988d3f54e3588c46de5c18dae09d683ace278b11f9d4d534"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:316d418fe258c036ba05fbf7dfc1f7d3d4096db63431546163b472285668132b"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d72b8b3abf8a2d51b7b9944a41307d2f442558ccb3859bbd87e6ae9be1694a5d"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2aed7e243be68487aa8982e91c6e260982d00da3f38955873aecd5a9204b1d66"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d371f7fc9cec83497fe7cf5eaf5b76e22a8efce463de5f775a1826197feb9df8"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8f310d63af08f583363dfb844ba8f9417b558199c58a5999215082036d795a1"}, + {file = "ruff-0.6.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7db6880c53c56addb8638fe444818183385ec85eeada1d48fc5abe045301b2f1"}, + {file = "ruff-0.6.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1175d39faadd9a50718f478d23bfc1d4da5743f1ab56af81a2b6caf0a2394f23"}, + {file = "ruff-0.6.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:5b939f9c86d51635fe486585389f54582f0d65b8238e08c327c1534844b3bb9a"}, + {file = "ruff-0.6.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d0d62ca91219f906caf9b187dea50d17353f15ec9bb15aae4a606cd697b49b4c"}, + {file = "ruff-0.6.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7438a7288f9d67ed3c8ce4d059e67f7ed65e9fe3aa2ab6f5b4b3610e57e3cb56"}, + {file = "ruff-0.6.2-py3-none-win32.whl", hash = "sha256:279d5f7d86696df5f9549b56b9b6a7f6c72961b619022b5b7999b15db392a4da"}, + {file = "ruff-0.6.2-py3-none-win_amd64.whl", hash = "sha256:d9f3469c7dd43cd22eb1c3fc16926fb8258d50cb1b216658a07be95dd117b0f2"}, + {file = "ruff-0.6.2-py3-none-win_arm64.whl", hash = "sha256:f28fcd2cd0e02bdf739297516d5643a945cc7caf09bd9bcb4d932540a5ea4fa9"}, + {file = "ruff-0.6.2.tar.gz", hash = "sha256:239ee6beb9e91feb8e0ec384204a763f36cb53fb895a1a364618c6abb076b3be"}, ] [[package]] @@ -3190,4 +3104,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "c165cdc1f6612c9f1b5bfd8063c23e2d595d717dd8ac1a468519e902be2cdf93" +content-hash = "2bf09e2b68f3abd1a0f9ff2227eb3026ac3d034845acfc120d0b1cb8167ea43b" diff --git a/pylint.cfg b/pylint.cfg deleted file mode 100644 index 2368997112..0000000000 --- a/pylint.cfg +++ /dev/null @@ -1,280 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Profiled execution. -profile=no - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - - -[MESSAGES CONTROL] - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. -#enable= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=missing-docstring - - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Add a comment according to your evaluation note. This is used by the global -# evaluation report (RP0004). -comment=no - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). -ignored-classes=SQLObject - -# When zope mode is activated, add a predefined set of Zope acquired attributes -# to generated-members. -zope=no - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E0201 when accessed. Python regular -# expressions are accepted. -generated-members=REQUEST,acl_users,aq_parent - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the beginning of the name of dummy variables -# (i.e. not used). -dummy-variables-rgx=_$|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - - -[BASIC] - -# Required attributes for module, separated by a comma -required-attributes= - -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,apply,input - -# Regular expression which should only match correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression which should only match correct module level names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression which should only match correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression which should only match correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct instance attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct attribute names in class -# bodies -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression which should only match correct list comprehension / -# generator expression variable names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=__.*__ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=80 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - - -[CLASSES] - -# List of interface methods to ignore, separated by a comma. This is used for -# instance to not check methods defines in Zope's Interface base class. -ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/pyproject.toml b/pyproject.toml index b31eca75ec..058c35c829 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,14 +34,9 @@ name = "Internal Changes" showcontent = true -[tool.black] -target-version = ['py38', 'py39', 'py310', 'py311'] -# black ignores everything in .gitignore by default, see -# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore -# Use `extend-exclude` if you want to exclude something in addition to this. - [tool.ruff] line-length = 88 +target-version = "py38" [tool.ruff.lint] # See https://beta.ruff.rs/docs/rules/#error-e @@ -63,6 +58,8 @@ select = [ "W", # pyflakes "F", + # isort + "I001", # flake8-bugbear "B0", # flake8-comprehensions @@ -79,17 +76,20 @@ select = [ "EXE", ] -[tool.isort] -line_length = 88 -sections = ["FUTURE", "STDLIB", "THIRDPARTY", "TWISTED", "FIRSTPARTY", "TESTS", "LOCALFOLDER"] -default_section = "THIRDPARTY" -known_first_party = ["synapse"] -known_tests = ["tests"] -known_twisted = ["twisted", "OpenSSL"] -multi_line_output = 3 -include_trailing_comma = true -combine_as_imports = true -skip_gitignore = true +[tool.ruff.lint.isort] +combine-as-imports = true +section-order = ["future", "standard-library", "third-party", "twisted", "first-party", "testing", "local-folder"] +known-first-party = ["synapse"] + +[tool.ruff.lint.isort.sections] +twisted = ["twisted", "OpenSSL"] +testing = ["tests"] + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" [tool.maturin] manifest-path = "rust/Cargo.toml" @@ -320,9 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -isort = ">=5.10.1" -black = ">=22.7.0" -ruff = "0.5.5" +ruff = "0.6.2" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 8acf0a6fb8..fa6ff90708 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -1,8 +1,9 @@ #!/usr/bin/env bash # # Runs linting scripts over the local Synapse checkout -# black - opinionated code formatter # ruff - lints and finds mistakes +# mypy - typechecks python code +# cargo clippy - lints rust code set -e @@ -101,12 +102,6 @@ echo # Print out the commands being run set -x -# Ensure the sort order of imports. -isort "${files[@]}" - -# Ensure Python code conforms to an opinionated style. -python3 -m black "${files[@]}" - # Ensure the sample configuration file conforms to style checks. ./scripts-dev/config-lint.sh diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index a411ed614e..1975ebb477 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -28,7 +28,7 @@ from synapse._pydantic_compat import HAS_PYDANTIC_V2 if TYPE_CHECKING or HAS_PYDANTIC_V2: from pydantic.v1 import Extra, StrictInt, StrictStr else: - from pydantic import StrictInt, StrictStr, Extra + from pydantic import Extra, StrictInt, StrictStr from signedjson.sign import sign_json diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index e12ab94576..1ac85ad66d 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -23,8 +23,11 @@ import logging from abc import ABCMeta from typing import TYPE_CHECKING, Any, Collection, Dict, Iterable, Optional, Union -from synapse.storage.database import make_in_list_sql_clause # noqa: F401; noqa: F401 -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + make_in_list_sql_clause, # noqa: F401 +) from synapse.types import get_domain_from_id from synapse.util import json_decoder from synapse.util.caches.descriptors import CachedFunction diff --git a/synmark/__init__.py b/synmark/__init__.py index 8c47e50c7c..887fec2f96 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py @@ -27,7 +27,9 @@ from synapse.types import ISynapseReactor try: from twisted.internet.epollreactor import EPollReactor as Reactor except ImportError: - from twisted.internet.pollreactor import PollReactor as Reactor # type: ignore[assignment] + from twisted.internet.pollreactor import ( # type: ignore[assignment] + PollReactor as Reactor, + ) from twisted.internet.main import installReactor From 7b75922020f308a30f0d93c56bd37d0560aba8bd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Aug 2024 15:35:18 +0100 Subject: [PATCH 184/332] 1.114.0rc2 --- CHANGES.md | 42 +++++++++++++++++++++++++++++++++++++++ changelog.d/17194.bugfix | 1 - changelog.d/17407.misc | 1 - changelog.d/17509.feature | 1 - changelog.d/17532.bugfix | 1 - changelog.d/17543.bugfix | 1 - changelog.d/17595.misc | 1 - changelog.d/17599.misc | 1 - changelog.d/17600.misc | 1 - changelog.d/17604.misc | 1 - changelog.d/17606.misc | 1 - changelog.d/17608.feature | 1 - changelog.d/17617.misc | 1 - changelog.d/17620.misc | 1 - changelog.d/17622.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 17 files changed, 49 insertions(+), 15 deletions(-) delete mode 100644 changelog.d/17194.bugfix delete mode 100644 changelog.d/17407.misc delete mode 100644 changelog.d/17509.feature delete mode 100644 changelog.d/17532.bugfix delete mode 100644 changelog.d/17543.bugfix delete mode 100644 changelog.d/17595.misc delete mode 100644 changelog.d/17599.misc delete mode 100644 changelog.d/17600.misc delete mode 100644 changelog.d/17604.misc delete mode 100644 changelog.d/17606.misc delete mode 100644 changelog.d/17608.feature delete mode 100644 changelog.d/17617.misc delete mode 100644 changelog.d/17620.misc delete mode 100644 changelog.d/17622.misc diff --git a/CHANGES.md b/CHANGES.md index 0a57cfb906..16995224e7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,45 @@ +# Synapse 1.114.0rc2 (2024-08-30) + +### Features + +- Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. ([\#17509](https://github.com/element-hq/synapse/issues/17509)) +- Make `hash_password` accept password input from stdin. ([\#17608](https://github.com/element-hq/synapse/issues/17608)) + +### Bugfixes + +- Fix hierarchy returning 403 when room is accessible through federation. Contributed by Krishan (@kfiven). ([\#17194](https://github.com/element-hq/synapse/issues/17194)) +- Fix content-length on federation /thumbnail responses. ([\#17532](https://github.com/element-hq/synapse/issues/17532)) +- Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17543](https://github.com/element-hq/synapse/issues/17543)) + +### Internal Changes + +- MSC3861: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407)) +- Refactor sliding sync class into multiple files. ([\#17595](https://github.com/element-hq/synapse/issues/17595)) +- Store sliding sync per-connection state in the database. ([\#17599](https://github.com/element-hq/synapse/issues/17599)) +- Make the sliding sync `PerConnectionState` class immutable. ([\#17600](https://github.com/element-hq/synapse/issues/17600)) +- Add support to `@tag_args` for standalone functions. ([\#17604](https://github.com/element-hq/synapse/issues/17604)) +- Speed up incremental syncs in sliding sync by adding some more caching. ([\#17606](https://github.com/element-hq/synapse/issues/17606)) +- Always return the user's own read receipts in sliding sync. ([\#17617](https://github.com/element-hq/synapse/issues/17617)) +- Replace `isort` and `black with `ruff`. ([\#17620](https://github.com/element-hq/synapse/issues/17620)) +- Refactor sliding sync code to move room list logic out into a separate class. ([\#17622](https://github.com/element-hq/synapse/issues/17622)) + + + +### Updates to locked dependencies + +* Bump attrs from 23.2.0 to 24.2.0. ([\#17609](https://github.com/element-hq/synapse/issues/17609)) +* Bump cryptography from 42.0.8 to 43.0.0. ([\#17584](https://github.com/element-hq/synapse/issues/17584)) +* Bump phonenumbers from 8.13.43 to 8.13.44. ([\#17610](https://github.com/element-hq/synapse/issues/17610)) +* Bump pygithub from 2.3.0 to 2.4.0. ([\#17612](https://github.com/element-hq/synapse/issues/17612)) +* Bump pyyaml from 6.0.1 to 6.0.2. ([\#17611](https://github.com/element-hq/synapse/issues/17611)) +* Bump sentry-sdk from 2.12.0 to 2.13.0. ([\#17585](https://github.com/element-hq/synapse/issues/17585)) +* Bump serde from 1.0.206 to 1.0.208. ([\#17581](https://github.com/element-hq/synapse/issues/17581)) +* Bump serde from 1.0.208 to 1.0.209. ([\#17613](https://github.com/element-hq/synapse/issues/17613)) +* Bump serde_json from 1.0.124 to 1.0.125. ([\#17582](https://github.com/element-hq/synapse/issues/17582)) +* Bump serde_json from 1.0.125 to 1.0.127. ([\#17614](https://github.com/element-hq/synapse/issues/17614)) +* Bump types-jsonschema from 4.23.0.20240712 to 4.23.0.20240813. ([\#17583](https://github.com/element-hq/synapse/issues/17583)) +* Bump types-setuptools from 71.1.0.20240726 to 71.1.0.20240818. ([\#17586](https://github.com/element-hq/synapse/issues/17586)) + # Synapse 1.114.0rc1 (2024-08-20) ### Features diff --git a/changelog.d/17194.bugfix b/changelog.d/17194.bugfix deleted file mode 100644 index 29ac56ceac..0000000000 --- a/changelog.d/17194.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix hierarchy returning 403 when room is accessible through federation. Contributed by Krishan (@kfiven). diff --git a/changelog.d/17407.misc b/changelog.d/17407.misc deleted file mode 100644 index 9ed6e61a5b..0000000000 --- a/changelog.d/17407.misc +++ /dev/null @@ -1 +0,0 @@ -MSC3861: load the issuer and account management URLs from OIDC discovery. diff --git a/changelog.d/17509.feature b/changelog.d/17509.feature deleted file mode 100644 index 6d639ceb98..0000000000 --- a/changelog.d/17509.feature +++ /dev/null @@ -1 +0,0 @@ -Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. diff --git a/changelog.d/17532.bugfix b/changelog.d/17532.bugfix deleted file mode 100644 index 5b05f0f9ba..0000000000 --- a/changelog.d/17532.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix content-length on federation /thumbnail responses. diff --git a/changelog.d/17543.bugfix b/changelog.d/17543.bugfix deleted file mode 100644 index 152b305e58..0000000000 --- a/changelog.d/17543.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix authenticated media responses using a wrong limit when following redirects over federation. \ No newline at end of file diff --git a/changelog.d/17595.misc b/changelog.d/17595.misc deleted file mode 100644 index c8e040d87c..0000000000 --- a/changelog.d/17595.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor sliding sync class into multiple files. diff --git a/changelog.d/17599.misc b/changelog.d/17599.misc deleted file mode 100644 index 2f81356d12..0000000000 --- a/changelog.d/17599.misc +++ /dev/null @@ -1 +0,0 @@ -Store sliding sync per-connection state in the database. diff --git a/changelog.d/17600.misc b/changelog.d/17600.misc deleted file mode 100644 index a81c67f6d1..0000000000 --- a/changelog.d/17600.misc +++ /dev/null @@ -1 +0,0 @@ -Make the sliding sync `PerConnectionState` class immutable. diff --git a/changelog.d/17604.misc b/changelog.d/17604.misc deleted file mode 100644 index 96cb213bbd..0000000000 --- a/changelog.d/17604.misc +++ /dev/null @@ -1 +0,0 @@ -Add support to `@tag_args` for standalone functions. diff --git a/changelog.d/17606.misc b/changelog.d/17606.misc deleted file mode 100644 index 47634b1305..0000000000 --- a/changelog.d/17606.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up incremental syncs in sliding sync by adding some more caching. diff --git a/changelog.d/17608.feature b/changelog.d/17608.feature deleted file mode 100644 index adf9ac5533..0000000000 --- a/changelog.d/17608.feature +++ /dev/null @@ -1 +0,0 @@ -Make `hash_password` accept password input from stdin. \ No newline at end of file diff --git a/changelog.d/17617.misc b/changelog.d/17617.misc deleted file mode 100644 index ba05648965..0000000000 --- a/changelog.d/17617.misc +++ /dev/null @@ -1 +0,0 @@ -Always return the user's own read receipts in sliding sync. diff --git a/changelog.d/17620.misc b/changelog.d/17620.misc deleted file mode 100644 index f583cdcb38..0000000000 --- a/changelog.d/17620.misc +++ /dev/null @@ -1 +0,0 @@ -Replace `isort` and `black with `ruff`. diff --git a/changelog.d/17622.misc b/changelog.d/17622.misc deleted file mode 100644 index af064f7e13..0000000000 --- a/changelog.d/17622.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor sliding sync code to move room list logic out into a separate class. diff --git a/debian/changelog b/debian/changelog index f32dcc0450..b09a914d84 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.114.0~rc2) stable; urgency=medium + + * New Synapse release 1.114.0rc2. + + -- Synapse Packaging team Fri, 30 Aug 2024 15:35:13 +0100 + matrix-synapse-py3 (1.114.0~rc1) stable; urgency=medium * New synapse release 1.114.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 058c35c829..bbe5eb791b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.114.0rc1" +version = "1.114.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 1caff7552654337059b783e73ff8d917ba129c74 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Aug 2024 15:36:52 +0100 Subject: [PATCH 185/332] Fixup changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 16995224e7..24c5c72206 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,7 +8,7 @@ ### Bugfixes - Fix hierarchy returning 403 when room is accessible through federation. Contributed by Krishan (@kfiven). ([\#17194](https://github.com/element-hq/synapse/issues/17194)) -- Fix content-length on federation /thumbnail responses. ([\#17532](https://github.com/element-hq/synapse/issues/17532)) +- Fix content-length on federation `/thumbnail` responses. ([\#17532](https://github.com/element-hq/synapse/issues/17532)) - Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17543](https://github.com/element-hq/synapse/issues/17543)) ### Internal Changes From a5a454fc35d41f0631ecbe357d3ba76c65bac36e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Aug 2024 15:39:53 +0100 Subject: [PATCH 186/332] Fixup changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 24c5c72206..f160105c95 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,7 +3,7 @@ ### Features - Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. ([\#17509](https://github.com/element-hq/synapse/issues/17509)) -- Make `hash_password` accept password input from stdin. ([\#17608](https://github.com/element-hq/synapse/issues/17608)) +- Make `hash_password` script accept password input from stdin. ([\#17608](https://github.com/element-hq/synapse/issues/17608)) ### Bugfixes From da58e55a0bd4169cc14c3b55deec3b84ce23efa3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 29 Aug 2024 13:26:17 +0100 Subject: [PATCH 187/332] Fix starting non-media repos (#17626) Regressed in #17543. The `max_download_size` config is not available on workers that don't load the media repo. Besides, we should honour the max_size param that was passed into the function. --- changelog.d/17626.bugfix | 1 + synapse/http/matrixfederationclient.py | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17626.bugfix diff --git a/changelog.d/17626.bugfix b/changelog.d/17626.bugfix new file mode 100644 index 0000000000..1dbb2a2f45 --- /dev/null +++ b/changelog.d/17626.bugfix @@ -0,0 +1 @@ +Fix authenticated media responses using a wrong limit when following redirects over federation. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 12c41c39e9..ecbbb6cfc4 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -464,8 +464,6 @@ class MatrixFederationHttpClient: self.max_long_retries = hs.config.federation.max_long_retries self.max_short_retries = hs.config.federation.max_short_retries - self.max_download_size = hs.config.media.max_upload_size - self._cooperator = Cooperator(scheduler=_make_scheduler(self.reactor)) self._sleeper = AwakenableSleeper(self.reactor) @@ -1759,9 +1757,9 @@ class MatrixFederationHttpClient: str_url, ) # We don't know how large the response will be upfront, so limit it to - # the `max_upload_size` config value. + # the `max_size` config value. length, headers, _, _ = await self._simple_http_client.get_file( - str_url, output_stream, self.max_download_size + str_url, output_stream, max_size ) logger.info( From d6125c583d774cae88af189ec1e98256fdaec92f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Aug 2024 16:38:08 +0100 Subject: [PATCH 188/332] 1.114.0rc3 --- CHANGES.md | 9 +++++++++ changelog.d/17626.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/17626.bugfix diff --git a/CHANGES.md b/CHANGES.md index f160105c95..24253486fa 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +# Synapse 1.114.0rc3 (2024-08-30) + +### Bugfixes + +- Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17626](https://github.com/element-hq/synapse/issues/17626)) + + + + # Synapse 1.114.0rc2 (2024-08-30) ### Features diff --git a/changelog.d/17626.bugfix b/changelog.d/17626.bugfix deleted file mode 100644 index 1dbb2a2f45..0000000000 --- a/changelog.d/17626.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix authenticated media responses using a wrong limit when following redirects over federation. diff --git a/debian/changelog b/debian/changelog index b09a914d84..32a2332c4f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.114.0~rc3) stable; urgency=medium + + * New Synapse release 1.114.0rc3. + + -- Synapse Packaging team Fri, 30 Aug 2024 16:38:05 +0100 + matrix-synapse-py3 (1.114.0~rc2) stable; urgency=medium * New Synapse release 1.114.0rc2. diff --git a/pyproject.toml b/pyproject.toml index bbe5eb791b..bc88ca9c60 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.114.0rc2" +version = "1.114.0rc3" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 966a50bb630290b079dfb56cd0e032f7492211f5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 30 Aug 2024 16:38:53 +0100 Subject: [PATCH 189/332] Fixup changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 24253486fa..a8d3a917e2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,7 +2,7 @@ ### Bugfixes -- Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17626](https://github.com/element-hq/synapse/issues/17626)) +- Fix regression in v1.114.0rc2 that caused workers to fail to start. ([\#17626](https://github.com/element-hq/synapse/issues/17626)) From d52c17ce01e56cd6fa669b1d73ff74e7d9c8d1bd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sun, 1 Sep 2024 10:18:45 +0100 Subject: [PATCH 190/332] Sliding sync: various fixes to background update (#17636) Follows on from #17512, other fixes include: #17633, #17634, #17635 --- changelog.d/17636.misc | 1 + synapse/storage/databases/main/events.py | 2 +- .../databases/main/events_bg_updates.py | 46 ++++++++++++++----- .../storage/databases/main/events_worker.py | 26 +++++++++-- 4 files changed, 59 insertions(+), 16 deletions(-) create mode 100644 changelog.d/17636.misc diff --git a/changelog.d/17636.misc b/changelog.d/17636.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17636.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index f3dbe5bba7..e44b8d8e54 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1861,7 +1861,7 @@ class PersistEventsStore: VALUES ( ?, ?, ?, ?, ?, (SELECT stream_ordering FROM events WHERE event_id = ?), - (SELECT instance_name FROM events WHERE event_id = ?) + (SELECT COALESCE(instance_name, 'master') FROM events WHERE event_id = ?) {("," + ", ".join("?" for _ in sliding_sync_snapshot_values)) if sliding_sync_snapshot_values else ""} ) ON CONFLICT (room_id, user_id) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index cb23f433bc..e819364a16 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -41,6 +41,7 @@ from synapse.storage.databases.main.events import ( SlidingSyncMembershipSnapshotSharedInsertValues, SlidingSyncStateInsertValues, ) +from synapse.storage.databases.main.events_worker import DatabaseCorruptionError from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.types import Cursor @@ -1857,6 +1858,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS initial_phase = True last_room_id = progress.get("last_room_id", "") + last_user_id = progress.get("last_user_id", "") last_event_stream_ordering = progress["last_event_stream_ordering"] def _find_memberships_to_update_txn( @@ -1887,11 +1889,11 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS FROM local_current_membership AS c INNER JOIN events AS e USING (event_id) LEFT JOIN rooms AS r ON (c.room_id = r.room_id) - WHERE c.room_id > ? - ORDER BY c.room_id ASC + WHERE (c.room_id, c.user_id) > (?, ?) + ORDER BY c.room_id ASC, c.user_id ASC LIMIT ? """, - (last_room_id, batch_size), + (last_room_id, last_user_id, batch_size), ) elif last_event_stream_ordering is not None: # It's important to sort by `event_stream_ordering` *ascending* (oldest to @@ -1993,6 +1995,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS WHERE room_id = ? AND m.user_id = ? + AND (m.membership = ? OR m.membership = ?) AND e.event_id != ? ORDER BY e.topological_ordering DESC LIMIT 1 @@ -2000,6 +2003,8 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ( room_id, user_id, + Membership.INVITE, + Membership.KNOCK, event_id, ), ) @@ -2081,9 +2086,17 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # have `current_state_events` and we should have some current state # for each room if current_state_ids_map: - fetched_events = await self.get_events( - current_state_ids_map.values() - ) + try: + fetched_events = await self.get_events( + current_state_ids_map.values() + ) + except DatabaseCorruptionError as e: + logger.warning( + "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", + room_id, + e, + ) + continue current_state_map: StateMap[EventBase] = { state_key: fetched_events[event_id] @@ -2124,7 +2137,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS False ) elif membership in (Membership.INVITE, Membership.KNOCK) or ( - membership == Membership.LEAVE and is_outlier + membership in (Membership.LEAVE, Membership.BAN) and is_outlier ): invite_or_knock_event_id = membership_event_id invite_or_knock_membership = membership @@ -2135,7 +2148,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # us a consistent view of the room state regardless of your # membership (i.e. the room shouldn't disappear if your using the # `is_encrypted` filter and you leave). - if membership == Membership.LEAVE and is_outlier: + if membership in (Membership.LEAVE, Membership.BAN) and is_outlier: invite_or_knock_event_id, invite_or_knock_membership = ( await self.db_pool.runInteraction( "sliding_sync_membership_snapshots_bg_update._find_previous_membership", @@ -2182,7 +2195,15 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS await_full_state=False, ) - fetched_events = await self.get_events(state_ids_map.values()) + try: + fetched_events = await self.get_events(state_ids_map.values()) + except DatabaseCorruptionError as e: + logger.warning( + "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", + room_id, + e, + ) + continue state_map: StateMap[EventBase] = { state_key: fetched_events[event_id] @@ -2296,7 +2317,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ( room_id, _room_id_from_rooms_table, - _user_id, + user_id, _sender, _membership_event_id, _membership, @@ -2308,8 +2329,11 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS progress = { "initial_phase": initial_phase, "last_room_id": room_id, - "last_event_stream_ordering": membership_event_stream_ordering, + "last_user_id": user_id, + "last_event_stream_ordering": last_event_stream_ordering, } + if not initial_phase: + progress["last_event_stream_ordering"] = membership_event_stream_ordering await self.db_pool.updates._background_update_progress( _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 6079cc4a52..1d83390827 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -98,6 +98,26 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +class DatabaseCorruptionError(RuntimeError): + """We found an event in the DB that has a persisted event ID that doesn't + match its computed event ID.""" + + def __init__( + self, room_id: str, persisted_event_id: str, computed_event_id: str + ) -> None: + self.room_id = room_id + self.persisted_event_id = persisted_event_id + self.computed_event_id = computed_event_id + + message = ( + f"Database corruption: Event {persisted_event_id} in room {room_id} " + f"from the database appears to have been modified (calculated " + f"event id {computed_event_id})" + ) + + super().__init__(message) + + # These values are used in the `enqueue_event` and `_fetch_loop` methods to # control how we batch/bulk fetch events from the database. # The values are plucked out of thing air to make initial sync run faster @@ -1364,10 +1384,8 @@ class EventsWorkerStore(SQLBaseStore): if original_ev.event_id != event_id: # it's difficult to see what to do here. Pretty much all bets are off # if Synapse cannot rely on the consistency of its database. - raise RuntimeError( - f"Database corruption: Event {event_id} in room {d['room_id']} " - f"from the database appears to have been modified (calculated " - f"event id {original_ev.event_id})" + raise DatabaseCorruptionError( + d["room_id"], event_id, original_ev.event_id ) event_map[event_id] = original_ev From b4d0356e48857aabcc2d51077c6e3947ab63ae36 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sun, 1 Sep 2024 10:42:49 +0100 Subject: [PATCH 191/332] Also handle invalid event errors --- synapse/storage/databases/main/events_bg_updates.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index e819364a16..ee38b3738b 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -41,7 +41,10 @@ from synapse.storage.databases.main.events import ( SlidingSyncMembershipSnapshotSharedInsertValues, SlidingSyncStateInsertValues, ) -from synapse.storage.databases.main.events_worker import DatabaseCorruptionError +from synapse.storage.databases.main.events_worker import ( + DatabaseCorruptionError, + InvalidEventError, +) from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.types import Cursor @@ -2090,7 +2093,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS fetched_events = await self.get_events( current_state_ids_map.values() ) - except DatabaseCorruptionError as e: + except (DatabaseCorruptionError, InvalidEventError) as e: logger.warning( "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", room_id, @@ -2197,7 +2200,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS try: fetched_events = await self.get_events(state_ids_map.values()) - except DatabaseCorruptionError as e: + except (DatabaseCorruptionError, InvalidEventError) as e: logger.warning( "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", room_id, From 8b6ff1dba563defdd731c4320123ec3a0a2fc7e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sun, 1 Sep 2024 10:43:26 +0100 Subject: [PATCH 192/332] Revert "Also handle invalid event errors" This reverts commit b4d0356e48857aabcc2d51077c6e3947ab63ae36. --- synapse/storage/databases/main/events_bg_updates.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index ee38b3738b..e819364a16 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -41,10 +41,7 @@ from synapse.storage.databases.main.events import ( SlidingSyncMembershipSnapshotSharedInsertValues, SlidingSyncStateInsertValues, ) -from synapse.storage.databases.main.events_worker import ( - DatabaseCorruptionError, - InvalidEventError, -) +from synapse.storage.databases.main.events_worker import DatabaseCorruptionError from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.types import Cursor @@ -2093,7 +2090,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS fetched_events = await self.get_events( current_state_ids_map.values() ) - except (DatabaseCorruptionError, InvalidEventError) as e: + except DatabaseCorruptionError as e: logger.warning( "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", room_id, @@ -2200,7 +2197,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS try: fetched_events = await self.get_events(state_ids_map.values()) - except (DatabaseCorruptionError, InvalidEventError) as e: + except DatabaseCorruptionError as e: logger.warning( "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", room_id, From 560b43ac02ecb7537683b1523ed6725b1bc10bf5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sun, 1 Sep 2024 10:52:03 +0100 Subject: [PATCH 193/332] Sliding Sync: Split up `get_room_membership_for_user_at_to_token` (#17629) This is to make it easier to reuse the logic when adding support for the new tables --------- Co-authored-by: Eric Eastwood --- changelog.d/17629.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 413 ++++++++++++-------- 2 files changed, 242 insertions(+), 172 deletions(-) create mode 100644 changelog.d/17629.misc diff --git a/changelog.d/17629.misc b/changelog.d/17629.misc new file mode 100644 index 0000000000..1eb46b2c68 --- /dev/null +++ b/changelog.d/17629.misc @@ -0,0 +1 @@ +Sliding Sync: Split up `get_room_membership_for_user_at_to_token`. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 0e6cb28524..f41180feda 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -25,6 +25,7 @@ from typing import ( Mapping, Optional, Set, + Tuple, Union, ) @@ -46,6 +47,7 @@ from synapse.storage.databases.main.state import ( Sentinel as StateSentinel, ) from synapse.storage.databases.main.stream import CurrentStateDeltaMembership +from synapse.storage.roommember import RoomsForUser from synapse.types import ( MutableStateMap, PersistedEventPosition, @@ -424,6 +426,156 @@ class SlidingSyncRoomLists: room_membership_for_user_map=room_membership_for_user_map, ) + @trace + async def _get_rewind_changes_to_current_membership_to_token( + self, + user: UserID, + rooms_for_user: Mapping[str, RoomsForUser], + to_token: StreamToken, + ) -> Mapping[str, Optional[RoomsForUser]]: + """ + Takes the current set of rooms for a user (retrieved after the given + token), and returns the changes needed to "rewind" it to match the set of + memberships *at that token* (<= `to_token`). + + Args: + user: User to fetch rooms for + rooms_for_user: The set of rooms for the user after the `to_token`. + to_token: The token to rewind to + + Returns: + The changes to apply to rewind the the current memberships. + """ + # If the user has never joined any rooms before, we can just return an empty list + if not rooms_for_user: + return {} + + user_id = user.to_string() + + # Get the `RoomStreamToken` that represents the spot we queried up to when we got + # our membership snapshot from `get_rooms_for_local_user_where_membership_is()`. + # + # First, we need to get the max stream_ordering of each event persister instance + # that we queried events from. + instance_to_max_stream_ordering_map: Dict[str, int] = {} + for room_for_user in rooms_for_user.values(): + instance_name = room_for_user.event_pos.instance_name + stream_ordering = room_for_user.event_pos.stream + + current_instance_max_stream_ordering = ( + instance_to_max_stream_ordering_map.get(instance_name) + ) + if ( + current_instance_max_stream_ordering is None + or stream_ordering > current_instance_max_stream_ordering + ): + instance_to_max_stream_ordering_map[instance_name] = stream_ordering + + # Then assemble the `RoomStreamToken` + min_stream_pos = min(instance_to_max_stream_ordering_map.values()) + membership_snapshot_token = RoomStreamToken( + # Minimum position in the `instance_map` + stream=min_stream_pos, + instance_map=immutabledict( + { + instance_name: stream_pos + for instance_name, stream_pos in instance_to_max_stream_ordering_map.items() + if stream_pos > min_stream_pos + } + ), + ) + + # Since we fetched the users room list at some point in time after the + # tokens, we need to revert/rewind some membership changes to match the point in + # time of the `to_token`. In particular, we need to make these fixups: + # + # - a) Remove rooms that the user joined after the `to_token` + # - b) Update room membership events to the point in time of the `to_token` + + # Fetch membership changes that fall in the range from `to_token` up to + # `membership_snapshot_token` + # + # If our `to_token` is already the same or ahead of the latest room membership + # for the user, we don't need to do any "2)" fix-ups and can just straight-up + # use the room list from the snapshot as a base (nothing has changed) + current_state_delta_membership_changes_after_to_token = [] + if not membership_snapshot_token.is_before_or_eq(to_token.room_key): + current_state_delta_membership_changes_after_to_token = ( + await self.store.get_current_state_delta_membership_changes_for_user( + user_id, + from_key=to_token.room_key, + to_key=membership_snapshot_token, + excluded_room_ids=self.rooms_to_exclude_globally, + ) + ) + + if not current_state_delta_membership_changes_after_to_token: + # There have been no membership changes, so we can early return. + return {} + + # Otherwise we're about to make changes to `rooms_for_user`, so we turn + # it into a mutable dict. + changes: Dict[str, Optional[RoomsForUser]] = {} + + # Assemble a list of the first membership event after the `to_token` so we can + # step backward to the previous membership that would apply to the from/to + # range. + first_membership_change_by_room_id_after_to_token: Dict[ + str, CurrentStateDeltaMembership + ] = {} + for membership_change in current_state_delta_membership_changes_after_to_token: + # Only set if we haven't already set it + first_membership_change_by_room_id_after_to_token.setdefault( + membership_change.room_id, membership_change + ) + + # Since we fetched a snapshot of the users room list at some point in time after + # the from/to tokens, we need to revert/rewind some membership changes to match + # the point in time of the `to_token`. + for ( + room_id, + first_membership_change_after_to_token, + ) in first_membership_change_by_room_id_after_to_token.items(): + # 1a) Remove rooms that the user joined after the `to_token` + if first_membership_change_after_to_token.prev_event_id is None: + changes[room_id] = None + # 1b) 1c) From the first membership event after the `to_token`, step backward to the + # previous membership that would apply to the from/to range. + else: + # We don't expect these fields to be `None` if we have a `prev_event_id` + # but we're being defensive since it's possible that the prev event was + # culled from the database. + if ( + first_membership_change_after_to_token.prev_event_pos is not None + and first_membership_change_after_to_token.prev_membership + is not None + and first_membership_change_after_to_token.prev_sender is not None + ): + # We need to know the room version ID, which we normally we + # can get from the current membership, but if we don't have + # that then we need to query the DB. + current_membership = rooms_for_user.get(room_id) + if current_membership is not None: + room_version_id = current_membership.room_version_id + else: + room_version_id = await self.store.get_room_version_id(room_id) + + changes[room_id] = RoomsForUser( + room_id=room_id, + event_id=first_membership_change_after_to_token.prev_event_id, + event_pos=first_membership_change_after_to_token.prev_event_pos, + membership=first_membership_change_after_to_token.prev_membership, + sender=first_membership_change_after_to_token.prev_sender, + room_version_id=room_version_id, + ) + else: + # If we can't find the previous membership event, we shouldn't + # include the room in the sync response since we can't determine the + # exact membership state and shouldn't rely on the current snapshot. + changes[room_id] = None + + return changes + @trace async def get_room_membership_for_user_at_to_token( self, @@ -469,139 +621,89 @@ class SlidingSyncRoomLists: if not room_for_user_list: return {} + # Since we fetched the users room list at some point in time after the + # tokens, we need to revert/rewind some membership changes to match the point in + # time of the `to_token`. + rooms_for_user = {room.room_id: room for room in room_for_user_list} + changes = await self._get_rewind_changes_to_current_membership_to_token( + user, rooms_for_user, to_token + ) + for room_id, change_room_for_user in changes.items(): + if change_room_for_user is None: + rooms_for_user.pop(room_id, None) + else: + rooms_for_user[room_id] = change_room_for_user + + newly_joined_room_ids, newly_left_room_ids = ( + await self._get_newly_joined_and_left_rooms( + user_id, to_token=to_token, from_token=from_token + ) + ) + + dm_room_ids = await self._get_dm_rooms_for_user(user_id) + # Our working list of rooms that can show up in the sync response sync_room_id_set = { - # Note: The `room_for_user` we're assigning here will need to be fixed up - # (below) because they are potentially from the current snapshot time - # instead from the time of the `to_token`. room_for_user.room_id: _RoomMembershipForUser( room_id=room_for_user.room_id, event_id=room_for_user.event_id, event_pos=room_for_user.event_pos, membership=room_for_user.membership, sender=room_for_user.sender, - # We will update these fields below to be accurate - newly_joined=False, - newly_left=False, - is_dm=False, + newly_joined=room_id in newly_joined_room_ids, + newly_left=room_id in newly_left_room_ids, + is_dm=room_id in dm_room_ids, ) - for room_for_user in room_for_user_list + for room_id, room_for_user in rooms_for_user.items() } - # Get the `RoomStreamToken` that represents the spot we queried up to when we got - # our membership snapshot from `get_rooms_for_local_user_where_membership_is()`. - # - # First, we need to get the max stream_ordering of each event persister instance - # that we queried events from. - instance_to_max_stream_ordering_map: Dict[str, int] = {} - for room_for_user in room_for_user_list: - instance_name = room_for_user.event_pos.instance_name - stream_ordering = room_for_user.event_pos.stream + # Ensure we have entries for rooms that the user has been "state reset" + # out of. These are rooms appear in the `newly_left_rooms` map but + # aren't in the `rooms_for_user` map. + for room_id, left_event_pos in newly_left_room_ids.items(): + if room_id in sync_room_id_set: + continue - current_instance_max_stream_ordering = ( - instance_to_max_stream_ordering_map.get(instance_name) - ) - if ( - current_instance_max_stream_ordering is None - or stream_ordering > current_instance_max_stream_ordering - ): - instance_to_max_stream_ordering_map[instance_name] = stream_ordering - - # Then assemble the `RoomStreamToken` - min_stream_pos = min(instance_to_max_stream_ordering_map.values()) - membership_snapshot_token = RoomStreamToken( - # Minimum position in the `instance_map` - stream=min_stream_pos, - instance_map=immutabledict( - { - instance_name: stream_pos - for instance_name, stream_pos in instance_to_max_stream_ordering_map.items() - if stream_pos > min_stream_pos - } - ), - ) - - # Since we fetched the users room list at some point in time after the from/to - # tokens, we need to revert/rewind some membership changes to match the point in - # time of the `to_token`. In particular, we need to make these fixups: - # - # - 1a) Remove rooms that the user joined after the `to_token` - # - 1b) Add back rooms that the user left after the `to_token` - # - 1c) Update room membership events to the point in time of the `to_token` - # - 2) Figure out which rooms are `newly_left` rooms (> `from_token` and <= `to_token`) - # - 3) Figure out which rooms are `newly_joined` (> `from_token` and <= `to_token`) - # - 4) Figure out which rooms are DM's - - # 1) Fetch membership changes that fall in the range from `to_token` up to - # `membership_snapshot_token` - # - # If our `to_token` is already the same or ahead of the latest room membership - # for the user, we don't need to do any "2)" fix-ups and can just straight-up - # use the room list from the snapshot as a base (nothing has changed) - current_state_delta_membership_changes_after_to_token = [] - if not membership_snapshot_token.is_before_or_eq(to_token.room_key): - current_state_delta_membership_changes_after_to_token = ( - await self.store.get_current_state_delta_membership_changes_for_user( - user_id, - from_key=to_token.room_key, - to_key=membership_snapshot_token, - excluded_room_ids=self.rooms_to_exclude_globally, - ) + sync_room_id_set[room_id] = _RoomMembershipForUser( + room_id=room_id, + event_id=None, + event_pos=left_event_pos, + membership=Membership.LEAVE, + sender=None, + newly_joined=False, + newly_left=True, + is_dm=room_id in dm_room_ids, ) - # 1) Assemble a list of the first membership event after the `to_token` so we can - # step backward to the previous membership that would apply to the from/to - # range. - first_membership_change_by_room_id_after_to_token: Dict[ - str, CurrentStateDeltaMembership - ] = {} - for membership_change in current_state_delta_membership_changes_after_to_token: - # Only set if we haven't already set it - first_membership_change_by_room_id_after_to_token.setdefault( - membership_change.room_id, membership_change - ) + return sync_room_id_set - # 1) Fixup + @trace + async def _get_newly_joined_and_left_rooms( + self, + user_id: str, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Tuple[StrCollection, Mapping[str, PersistedEventPosition]]: + """Fetch the sets of rooms that the user newly joined or left in the + given token range. + + Note: there may be rooms in the newly left rooms where the user was + "state reset" out of the room, and so that room would not be part of the + "current memberships" of the user. + + Returns: + A 2-tuple of newly joined room IDs and a map of newly left room + IDs to the event position the leave happened at. + """ + newly_joined_room_ids: Set[str] = set() + newly_left_room_map: Dict[str, PersistedEventPosition] = {} + + # We need to figure out the # - # Since we fetched a snapshot of the users room list at some point in time after - # the from/to tokens, we need to revert/rewind some membership changes to match - # the point in time of the `to_token`. - for ( - room_id, - first_membership_change_after_to_token, - ) in first_membership_change_by_room_id_after_to_token.items(): - # 1a) Remove rooms that the user joined after the `to_token` - if first_membership_change_after_to_token.prev_event_id is None: - sync_room_id_set.pop(room_id, None) - # 1b) 1c) From the first membership event after the `to_token`, step backward to the - # previous membership that would apply to the from/to range. - else: - # We don't expect these fields to be `None` if we have a `prev_event_id` - # but we're being defensive since it's possible that the prev event was - # culled from the database. - if ( - first_membership_change_after_to_token.prev_event_pos is not None - and first_membership_change_after_to_token.prev_membership - is not None - ): - sync_room_id_set[room_id] = _RoomMembershipForUser( - room_id=room_id, - event_id=first_membership_change_after_to_token.prev_event_id, - event_pos=first_membership_change_after_to_token.prev_event_pos, - membership=first_membership_change_after_to_token.prev_membership, - sender=first_membership_change_after_to_token.prev_sender, - # We will update these fields below to be accurate - newly_joined=False, - newly_left=False, - is_dm=False, - ) - else: - # If we can't find the previous membership event, we shouldn't - # include the room in the sync response since we can't determine the - # exact membership state and shouldn't rely on the current snapshot. - sync_room_id_set.pop(room_id, None) + # - 1) Figure out which rooms are `newly_left` rooms (> `from_token` and <= `to_token`) + # - 2) Figure out which rooms are `newly_joined` (> `from_token` and <= `to_token`) - # 2) Fetch membership changes that fall in the range from `from_token` up to `to_token` + # 1) Fetch membership changes that fall in the range from `from_token` up to `to_token` current_state_delta_membership_changes_in_from_to_range = [] if from_token: current_state_delta_membership_changes_in_from_to_range = ( @@ -613,7 +715,7 @@ class SlidingSyncRoomLists: ) ) - # 2) Assemble a list of the last membership events in some given ranges. Someone + # 1) Assemble a list of the last membership events in some given ranges. Someone # could have left and joined multiple times during the given range but we only # care about end-result so we grab the last one. last_membership_change_by_room_id_in_from_to_range: Dict[ @@ -646,9 +748,9 @@ class SlidingSyncRoomLists: if membership_change.membership != Membership.JOIN: has_non_join_event_by_room_id_in_from_to_range[room_id] = True - # 2) Fixup + # 1) Fixup # - # 3) We also want to assemble a list of possibly newly joined rooms. Someone + # 2) We also want to assemble a list of possibly newly joined rooms. Someone # could have left and joined multiple times during the given range but we only # care about whether they are joined at the end of the token range so we are # working with the last membership even in the token range. @@ -658,46 +760,18 @@ class SlidingSyncRoomLists: ) in last_membership_change_by_room_id_in_from_to_range.values(): room_id = last_membership_change_in_from_to_range.room_id - # 3) + # 2) if last_membership_change_in_from_to_range.membership == Membership.JOIN: possibly_newly_joined_room_ids.add(room_id) - # 2) Figure out newly_left rooms (> `from_token` and <= `to_token`). + # 1) Figure out newly_left rooms (> `from_token` and <= `to_token`). if last_membership_change_in_from_to_range.membership == Membership.LEAVE: - # 2) Mark this room as `newly_left` + # 1) Mark this room as `newly_left` + newly_left_room_map[room_id] = ( + last_membership_change_in_from_to_range.event_pos + ) - # If we're seeing a membership change here, we should expect to already - # have it in our snapshot but if a state reset happens, it wouldn't have - # shown up in our snapshot but appear as a change here. - existing_sync_entry = sync_room_id_set.get(room_id) - if existing_sync_entry is not None: - # Normal expected case - sync_room_id_set[room_id] = existing_sync_entry.copy_and_replace( - newly_left=True - ) - else: - # State reset! - logger.warn( - "State reset detected for room_id %s with %s who is no longer in the room", - room_id, - user_id, - ) - # Even though a state reset happened which removed the person from - # the room, we still add it the list so the user knows they left the - # room. Downstream code can check for a state reset by looking for - # `event_id=None and membership is not None`. - sync_room_id_set[room_id] = _RoomMembershipForUser( - room_id=room_id, - event_id=last_membership_change_in_from_to_range.event_id, - event_pos=last_membership_change_in_from_to_range.event_pos, - membership=last_membership_change_in_from_to_range.membership, - sender=last_membership_change_in_from_to_range.sender, - newly_joined=False, - newly_left=True, - is_dm=False, - ) - - # 3) Figure out `newly_joined` + # 2) Figure out `newly_joined` for room_id in possibly_newly_joined_room_ids: has_non_join_in_from_to_range = ( has_non_join_event_by_room_id_in_from_to_range.get(room_id, False) @@ -706,9 +780,7 @@ class SlidingSyncRoomLists: # also some non-join in the range, we know they `newly_joined`. if has_non_join_in_from_to_range: # We found a `newly_joined` room (we left and joined within the token range) - sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( - newly_joined=True - ) + newly_joined_room_ids.add(room_id) else: prev_event_id = first_membership_change_by_room_id_in_from_to_range[ room_id @@ -720,20 +792,23 @@ class SlidingSyncRoomLists: if prev_event_id is None: # We found a `newly_joined` room (we are joining the room for the # first time within the token range) - sync_room_id_set[room_id] = sync_room_id_set[ - room_id - ].copy_and_replace(newly_joined=True) + newly_joined_room_ids.add(room_id) # Last resort, we need to step back to the previous membership event # just before the token range to see if we're joined then or not. elif prev_membership != Membership.JOIN: # We found a `newly_joined` room (we left before the token range # and joined within the token range) - sync_room_id_set[room_id] = sync_room_id_set[ - room_id - ].copy_and_replace(newly_joined=True) + newly_joined_room_ids.add(room_id) + + return newly_joined_room_ids, newly_left_room_map + + @trace + async def _get_dm_rooms_for_user( + self, + user_id: str, + ) -> StrCollection: + """Get the set of DM rooms for the user.""" - # 4) Figure out which rooms the user considers to be direct-message (DM) rooms - # # We're using global account data (`m.direct`) instead of checking for # `is_direct` on membership events because that property only appears for # the invitee membership event (doesn't show up for the inviter). @@ -755,13 +830,7 @@ class SlidingSyncRoomLists: if isinstance(room_id, str): dm_room_id_set.add(room_id) - # 4) Fixup - for room_id in sync_room_id_set: - sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( - is_dm=room_id in dm_room_id_set - ) - - return sync_room_id_set + return dm_room_id_set @trace async def filter_rooms_relevant_for_sync( From 709b7363fe13705f37389cce3ab21f0285f359c8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sun, 1 Sep 2024 11:25:39 +0100 Subject: [PATCH 194/332] Sliding sync: use new DB tables (#17630) Based on https://github.com/element-hq/synapse/pull/17629 Utilizing the new sliding sync tables added in https://github.com/element-hq/synapse/pull/17512 for fast acquisition of rooms for the user and filtering/sorting. --------- Co-authored-by: Eric Eastwood --- changelog.d/17630.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 562 +++++++++++++++++- synapse/storage/_base.py | 4 + synapse/storage/background_updates.py | 21 +- synapse/storage/databases/main/cache.py | 4 + .../databases/main/events_bg_updates.py | 11 + synapse/storage/databases/main/roommember.py | 55 +- synapse/storage/roommember.py | 13 + .../sliding_sync/test_connection_tracking.py | 16 +- .../test_extension_account_data.py | 16 + .../sliding_sync/test_extension_e2ee.py | 16 + .../sliding_sync/test_extension_receipts.py | 16 + .../sliding_sync/test_extension_to_device.py | 15 + .../sliding_sync/test_extension_typing.py | 16 + .../client/sliding_sync/test_extensions.py | 16 +- .../sliding_sync/test_room_subscriptions.py | 16 + .../client/sliding_sync/test_rooms_invites.py | 16 + .../client/sliding_sync/test_rooms_meta.py | 16 + .../sliding_sync/test_rooms_required_state.py | 16 +- .../sliding_sync/test_rooms_timeline.py | 16 + .../client/sliding_sync/test_sliding_sync.py | 33 + 21 files changed, 877 insertions(+), 18 deletions(-) create mode 100644 changelog.d/17630.misc diff --git a/changelog.d/17630.misc b/changelog.d/17630.misc new file mode 100644 index 0000000000..ed1bf6bd55 --- /dev/null +++ b/changelog.d/17630.misc @@ -0,0 +1 @@ +Use new database tables for sliding sync. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index f41180feda..12b7958c6f 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -18,6 +18,7 @@ import logging from itertools import chain from typing import ( TYPE_CHECKING, + AbstractSet, Any, Dict, List, @@ -47,7 +48,7 @@ from synapse.storage.databases.main.state import ( Sentinel as StateSentinel, ) from synapse.storage.databases.main.stream import CurrentStateDeltaMembership -from synapse.storage.roommember import RoomsForUser +from synapse.storage.roommember import RoomsForUser, RoomsForUserSlidingSync from synapse.types import ( MutableStateMap, PersistedEventPosition, @@ -143,7 +144,10 @@ class _RoomMembershipForUser: def filter_membership_for_sync( - *, user_id: str, room_membership_for_user: _RoomMembershipForUser + *, + user_id: str, + room_membership_for_user: Union[_RoomMembershipForUser, RoomsForUserSlidingSync], + newly_left: bool, ) -> bool: """ Returns True if the membership event should be included in the sync response, @@ -156,7 +160,6 @@ def filter_membership_for_sync( membership = room_membership_for_user.membership sender = room_membership_for_user.sender - newly_left = room_membership_for_user.newly_left # We want to allow everything except rooms the user has left unless `newly_left` # because we want everything that's *still* relevant to the user. We include @@ -198,6 +201,310 @@ class SlidingSyncRoomLists: ) -> SlidingSyncInterestedRooms: """Fetch the set of rooms that match the request""" + if await self.store.have_finished_sliding_sync_background_jobs(): + return await self._compute_interested_rooms_new_tables( + sync_config=sync_config, + previous_connection_state=previous_connection_state, + to_token=to_token, + from_token=from_token, + ) + else: + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + return await self._compute_interested_rooms_fallback( + sync_config=sync_config, + previous_connection_state=previous_connection_state, + to_token=to_token, + from_token=from_token, + ) + + @trace + async def _compute_interested_rooms_new_tables( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> SlidingSyncInterestedRooms: + """Implementation of `compute_interested_rooms` using new sliding sync db tables.""" + user_id = sync_config.user.to_string() + + # Assemble sliding window lists + lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} + # Keep track of the rooms that we can display and need to fetch more info about + relevant_room_map: Dict[str, RoomSyncConfig] = {} + # The set of room IDs of all rooms that could appear in any list. These + # include rooms that are outside the list ranges. + all_rooms: Set[str] = set() + + room_membership_for_user_map = await self.store.get_sliding_sync_rooms_for_user( + user_id + ) + + changes = await self._get_rewind_changes_to_current_membership_to_token( + sync_config.user, room_membership_for_user_map, to_token=to_token + ) + if changes: + room_membership_for_user_map = dict(room_membership_for_user_map) + for room_id, change in changes.items(): + if change is None: + # Remove rooms that the user joined after the `to_token` + room_membership_for_user_map.pop(room_id) + continue + + existing_room = room_membership_for_user_map.get(room_id) + if existing_room is not None: + # Update room membership events to the point in time of the `to_token` + room_membership_for_user_map[room_id] = RoomsForUserSlidingSync( + room_id=room_id, + sender=change.sender, + membership=change.membership, + event_id=change.event_id, + event_pos=change.event_pos, + room_version_id=change.room_version_id, + # We keep the current state of the room though + room_type=existing_room.room_type, + is_encrypted=existing_room.is_encrypted, + ) + else: + # This can happen if we get "state reset" out of the room + # after the `to_token`. In other words, there is no membership + # for the room after the `to_token` but we see membership in + # the token range. + + # Get the state at the time. Note that room type never changes, + # so we can just get current room type + room_type = await self.store.get_room_type(room_id) + is_encrypted = await self.get_is_encrypted_for_room_at_token( + room_id, to_token.room_key + ) + + # Add back rooms that the user was state-reset out of after `to_token` + room_membership_for_user_map[room_id] = RoomsForUserSlidingSync( + room_id=room_id, + sender=change.sender, + membership=change.membership, + event_id=change.event_id, + event_pos=change.event_pos, + room_version_id=change.room_version_id, + room_type=room_type, + is_encrypted=is_encrypted, + ) + + newly_joined_room_ids, newly_left_room_map = ( + await self._get_newly_joined_and_left_rooms( + user_id, from_token=from_token, to_token=to_token + ) + ) + dm_room_ids = await self._get_dm_rooms_for_user(user_id) + + # Handle state resets in the from -> to token range. + state_reset_rooms = ( + newly_left_room_map.keys() - room_membership_for_user_map.keys() + ) + if state_reset_rooms: + room_membership_for_user_map = dict(room_membership_for_user_map) + for room_id in ( + newly_left_room_map.keys() - room_membership_for_user_map.keys() + ): + # Get the state at the time. Note that room type never changes, + # so we can just get current room type + room_type = await self.store.get_room_type(room_id) + is_encrypted = await self.get_is_encrypted_for_room_at_token( + room_id, newly_left_room_map[room_id].to_room_stream_token() + ) + + room_membership_for_user_map[room_id] = RoomsForUserSlidingSync( + room_id=room_id, + sender=None, + membership=Membership.LEAVE, + event_id=None, + event_pos=newly_left_room_map[room_id], + room_version_id=await self.store.get_room_version_id(room_id), + room_type=room_type, + is_encrypted=is_encrypted, + ) + + if sync_config.lists: + sync_room_map = { + room_id: room_membership_for_user + for room_id, room_membership_for_user in room_membership_for_user_map.items() + if filter_membership_for_sync( + user_id=user_id, + room_membership_for_user=room_membership_for_user, + newly_left=room_id in newly_left_room_map, + ) + } + with start_active_span("assemble_sliding_window_lists"): + for list_key, list_config in sync_config.lists.items(): + # Apply filters + filtered_sync_room_map = sync_room_map + if list_config.filters is not None: + filtered_sync_room_map = await self.filter_rooms_using_tables( + user_id, + sync_room_map, + list_config.filters, + to_token, + dm_room_ids, + ) + + # Find which rooms are partially stated and may need to be filtered out + # depending on the `required_state` requested (see below). + partial_state_room_map = ( + await self.store.is_partial_state_room_batched( + filtered_sync_room_map.keys() + ) + ) + + # Since creating the `RoomSyncConfig` takes some work, let's just do it + # once. + room_sync_config = RoomSyncConfig.from_room_config(list_config) + + # Exclude partially-stated rooms if we must wait for the room to be + # fully-stated + if room_sync_config.must_await_full_state(self.is_mine_id): + filtered_sync_room_map = { + room_id: room + for room_id, room in filtered_sync_room_map.items() + if not partial_state_room_map.get(room_id) + } + + all_rooms.update(filtered_sync_room_map) + + # Sort the list + sorted_room_info = await self.sort_rooms_using_tables( + filtered_sync_room_map, to_token + ) + + ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] + if list_config.ranges: + for range in list_config.ranges: + room_ids_in_list: List[str] = [] + + # We're going to loop through the sorted list of rooms starting + # at the range start index and keep adding rooms until we fill + # up the range or run out of rooms. + # + # Both sides of range are inclusive so we `+ 1` + max_num_rooms = range[1] - range[0] + 1 + for room_membership in sorted_room_info[range[0] :]: + room_id = room_membership.room_id + + if len(room_ids_in_list) >= max_num_rooms: + break + + # Take the superset of the `RoomSyncConfig` for each room. + # + # Update our `relevant_room_map` with the room we're going + # to display and need to fetch more info about. + existing_room_sync_config = relevant_room_map.get( + room_id + ) + if existing_room_sync_config is not None: + room_sync_config = existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) + + relevant_room_map[room_id] = room_sync_config + + room_ids_in_list.append(room_id) + + ops.append( + SlidingSyncResult.SlidingWindowList.Operation( + op=OperationType.SYNC, + range=range, + room_ids=room_ids_in_list, + ) + ) + + lists[list_key] = SlidingSyncResult.SlidingWindowList( + count=len(sorted_room_info), + ops=ops, + ) + + if sync_config.room_subscriptions: + with start_active_span("assemble_room_subscriptions"): + # Find which rooms are partially stated and may need to be filtered out + # depending on the `required_state` requested (see below). + partial_state_room_map = await self.store.is_partial_state_room_batched( + sync_config.room_subscriptions.keys() + ) + + for ( + room_id, + room_subscription, + ) in sync_config.room_subscriptions.items(): + if room_id not in room_membership_for_user_map: + # TODO: Handle rooms the user isn't in. + continue + + all_rooms.add(room_id) + + # Take the superset of the `RoomSyncConfig` for each room. + room_sync_config = RoomSyncConfig.from_room_config( + room_subscription + ) + + # Exclude partially-stated rooms if we must wait for the room to be + # fully-stated + if room_sync_config.must_await_full_state(self.is_mine_id): + if partial_state_room_map.get(room_id): + continue + + all_rooms.add(room_id) + + # Update our `relevant_room_map` with the room we're going to display + # and need to fetch more info about. + existing_room_sync_config = relevant_room_map.get(room_id) + if existing_room_sync_config is not None: + room_sync_config = ( + existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) + ) + + relevant_room_map[room_id] = room_sync_config + + # Filtered subset of `relevant_room_map` for rooms that may have updates + # (in the event stream) + relevant_rooms_to_send_map = await self._filter_relevant_room_to_send( + previous_connection_state, from_token, relevant_room_map + ) + + return SlidingSyncInterestedRooms( + lists=lists, + relevant_room_map=relevant_room_map, + relevant_rooms_to_send_map=relevant_rooms_to_send_map, + all_rooms=all_rooms, + room_membership_for_user_map={ + # FIXME: Ideally we wouldn't have to create a new + # `_RoomMembershipForUser` here and instead just return + # `newly_joined_room_ids` directly, to save CPU time. + room_id: _RoomMembershipForUser( + room_id=room_id, + event_id=membership_info.event_id, + event_pos=membership_info.event_pos, + sender=membership_info.sender, + membership=membership_info.membership, + newly_joined=room_id in newly_joined_room_ids, + newly_left=room_id in newly_left_room_map, + is_dm=room_id in dm_room_ids, + ) + for room_id, membership_info in room_membership_for_user_map.items() + }, + ) + + async def _compute_interested_rooms_fallback( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> SlidingSyncInterestedRooms: + """Fallback code when the database background updates haven't completed yet.""" + room_membership_for_user_map = ( await self.get_room_membership_for_user_at_to_token( sync_config.user, to_token, from_token @@ -239,7 +546,7 @@ class SlidingSyncRoomLists: ) # Since creating the `RoomSyncConfig` takes some work, let's just do it - # once and make a copy whenever we need it. + # once. room_sync_config = RoomSyncConfig.from_room_config(list_config) # Exclude partially-stated rooms if we must wait for the room to be @@ -359,6 +666,29 @@ class SlidingSyncRoomLists: relevant_room_map[room_id] = room_sync_config + # Filtered subset of `relevant_room_map` for rooms that may have updates + # (in the event stream) + relevant_rooms_to_send_map = await self._filter_relevant_room_to_send( + previous_connection_state, from_token, relevant_room_map + ) + + return SlidingSyncInterestedRooms( + lists=lists, + relevant_room_map=relevant_room_map, + relevant_rooms_to_send_map=relevant_rooms_to_send_map, + all_rooms=all_rooms, + room_membership_for_user_map=room_membership_for_user_map, + ) + + async def _filter_relevant_room_to_send( + self, + previous_connection_state: PerConnectionState, + from_token: Optional[StreamToken], + relevant_room_map: Dict[str, RoomSyncConfig], + ) -> Dict[str, RoomSyncConfig]: + """Filters the `relevant_room_map` down to those rooms that may have + updates we need to fetch and return.""" + # Filtered subset of `relevant_room_map` for rooms that may have updates # (in the event stream) relevant_rooms_to_send_map: Dict[str, RoomSyncConfig] = relevant_room_map @@ -418,19 +748,13 @@ class SlidingSyncRoomLists: if room_id in rooms_should_send } - return SlidingSyncInterestedRooms( - lists=lists, - relevant_room_map=relevant_room_map, - relevant_rooms_to_send_map=relevant_rooms_to_send_map, - all_rooms=all_rooms, - room_membership_for_user_map=room_membership_for_user_map, - ) + return relevant_rooms_to_send_map @trace async def _get_rewind_changes_to_current_membership_to_token( self, user: UserID, - rooms_for_user: Mapping[str, RoomsForUser], + rooms_for_user: Mapping[str, Union[RoomsForUser, RoomsForUserSlidingSync]], to_token: StreamToken, ) -> Mapping[str, Optional[RoomsForUser]]: """ @@ -806,7 +1130,7 @@ class SlidingSyncRoomLists: async def _get_dm_rooms_for_user( self, user_id: str, - ) -> StrCollection: + ) -> AbstractSet[str]: """Get the set of DM rooms for the user.""" # We're using global account data (`m.direct`) instead of checking for @@ -872,6 +1196,7 @@ class SlidingSyncRoomLists: if filter_membership_for_sync( user_id=user_id, room_membership_for_user=room_membership_for_user, + newly_left=room_membership_for_user.newly_left, ) } @@ -1364,6 +1689,174 @@ class SlidingSyncRoomLists: # Assemble a new sync room map but only with the `filtered_room_id_set` return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} + @trace + async def filter_rooms_using_tables( + self, + user_id: str, + sync_room_map: Mapping[str, RoomsForUserSlidingSync], + filters: SlidingSyncConfig.SlidingSyncList.Filters, + to_token: StreamToken, + dm_room_ids: AbstractSet[str], + ) -> Dict[str, RoomsForUserSlidingSync]: + """ + Filter rooms based on the sync request. + + Args: + user: User to filter rooms for + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + filters: Filters to apply + to_token: We filter based on the state of the room at this token + dm_room_ids: Set of room IDs which are DMs + + Returns: + A filtered dictionary of room IDs along with membership information in the + room at the time of `to_token`. + """ + + filtered_room_id_set = set(sync_room_map.keys()) + + # Filter for Direct-Message (DM) rooms + if filters.is_dm is not None: + with start_active_span("filters.is_dm"): + if filters.is_dm: + # Intersect with the DM room set + filtered_room_id_set &= dm_room_ids + else: + # Remove DMs + filtered_room_id_set -= dm_room_ids + + if filters.spaces is not None: + with start_active_span("filters.spaces"): + raise NotImplementedError() + + # Filter for encrypted rooms + if filters.is_encrypted is not None: + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if sync_room_map[room_id].is_encrypted == filters.is_encrypted + } + + # Filter for rooms that the user has been invited to + if filters.is_invite is not None: + with start_active_span("filters.is_invite"): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + room_for_user = sync_room_map[room_id] + # If we're looking for invite rooms, filter out rooms that the user is + # not invited to and vice versa + if ( + filters.is_invite + and room_for_user.membership != Membership.INVITE + ) or ( + not filters.is_invite + and room_for_user.membership == Membership.INVITE + ): + filtered_room_id_set.remove(room_id) + + # Filter by room type (space vs room, etc). A room must match one of the types + # provided in the list. `None` is a valid type for rooms which do not have a + # room type. + if filters.room_types is not None or filters.not_room_types is not None: + with start_active_span("filters.room_types"): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + room_type = sync_room_map[room_id].room_type + + if ( + filters.room_types is not None + and room_type not in filters.room_types + ): + filtered_room_id_set.remove(room_id) + + if ( + filters.not_room_types is not None + and room_type in filters.not_room_types + ): + filtered_room_id_set.remove(room_id) + + if filters.room_name_like is not None: + with start_active_span("filters.room_name_like"): + # TODO: The room name is a bit more sensitive to leak than the + # create/encryption event. Maybe we should consider a better way to fetch + # historical state before implementing this. + # + # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms( + # content_type="room_name", + # room_ids=filtered_room_id_set, + # to_token=to_token, + # sync_room_map=sync_room_map, + # room_id_to_stripped_state_map=room_id_to_stripped_state_map, + # ) + raise NotImplementedError() + + if filters.tags is not None or filters.not_tags is not None: + with start_active_span("filters.tags"): + raise NotImplementedError() + + # Assemble a new sync room map but only with the `filtered_room_id_set` + return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} + + @trace + async def sort_rooms_using_tables( + self, + sync_room_map: Mapping[str, RoomsForUserSlidingSync], + to_token: StreamToken, + ) -> List[RoomsForUserSlidingSync]: + """ + Sort by `stream_ordering` of the last event that the user should see in the + room. `stream_ordering` is unique so we get a stable sort. + + Args: + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + to_token: We sort based on the events in the room at this token (<= `to_token`) + + Returns: + A sorted list of room IDs by `stream_ordering` along with membership information. + """ + + # Assemble a map of room ID to the `stream_ordering` of the last activity that the + # user should see in the room (<= `to_token`) + last_activity_in_room_map: Dict[str, int] = {} + + for room_id, room_for_user in sync_room_map.items(): + if room_for_user.membership != Membership.JOIN: + # If the user has left/been invited/knocked/been banned from a + # room, they shouldn't see anything past that point. + # + # FIXME: It's possible that people should see beyond this point + # in invited/knocked cases if for example the room has + # `invite`/`world_readable` history visibility, see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 + last_activity_in_room_map[room_id] = room_for_user.event_pos.stream + + # For fully-joined rooms, we find the latest activity at/before the + # `to_token`. + joined_room_positions = ( + await self.store.bulk_get_last_event_pos_in_room_before_stream_ordering( + [ + room_id + for room_id, room_for_user in sync_room_map.items() + if room_for_user.membership == Membership.JOIN + ], + to_token.room_key, + ) + ) + + last_activity_in_room_map.update(joined_room_positions) + + return sorted( + sync_room_map.values(), + # Sort by the last activity (stream_ordering) in the room + key=lambda room_info: last_activity_in_room_map[room_info.room_id], + # We want descending order + reverse=True, + ) + @trace async def sort_rooms( self, @@ -1420,3 +1913,46 @@ class SlidingSyncRoomLists: # We want descending order reverse=True, ) + + async def get_is_encrypted_for_room_at_token( + self, room_id: str, to_token: RoomStreamToken + ) -> bool: + """Get if the room is encrypted at the time.""" + + # Fetch the current encryption state + state_ids = await self.store.get_partial_filtered_current_state_ids( + room_id, StateFilter.from_types([(EventTypes.RoomEncryption, "")]) + ) + encryption_event_id = state_ids.get((EventTypes.RoomEncryption, "")) + + # Now roll back the state by looking at the state deltas between + # to_token and now. + deltas = await self.store.get_current_state_deltas_for_room( + room_id, + from_token=to_token, + to_token=self.store.get_room_max_token(), + ) + + for delta in deltas: + if delta.event_type != EventTypes.RoomEncryption: + continue + + # Found the first change, we look at the previous event ID to get + # the state at the to token. + + if delta.prev_event_id is None: + # There is no prev event, so no encryption state event, so room is not encrypted + return False + + encryption_event_id = delta.prev_event_id + break + + # We didn't find an encryption state, room isn't encrypted + if encryption_event_id is None: + return False + + # We found encryption state, check if content has a non-null algorithm + encrypted_event = await self.store.get_event(encryption_event_id) + algorithm = encrypted_event.content.get(EventContentFields.ENCRYPTION_ALGORITHM) + + return algorithm is not None diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 1ac85ad66d..d22160b85c 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -126,6 +126,9 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache( "_get_rooms_for_local_user_where_membership_is_inner", (user_id,) ) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user", (user_id,) + ) # Purge other caches based on room state. self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) @@ -160,6 +163,7 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_room_type", (room_id,)) self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) + self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) def _attempt_to_invalidate_cache( self, cache_name: str, key: Optional[Collection[Any]] diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index f473294070..efe4238036 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -44,7 +44,7 @@ from synapse._pydantic_compat import HAS_PYDANTIC_V2 from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.engines import PostgresEngine from synapse.storage.types import Connection, Cursor -from synapse.types import JsonDict +from synapse.types import JsonDict, StrCollection from synapse.util import Clock, json_encoder from . import engines @@ -487,6 +487,25 @@ class BackgroundUpdater: return not update_exists + async def have_completed_background_updates( + self, update_names: StrCollection + ) -> bool: + """Return the name of background updates that have not yet been + completed""" + if self._all_done: + return True + + rows = await self.db_pool.simple_select_many_batch( + table="background_updates", + column="update_name", + iterable=update_names, + retcols=("update_name",), + desc="get_uncompleted_background_updates", + ) + + # If we find any rows then we've not completed the update. + return not bool(rows) + async def do_next_background_update(self, sleep: bool = True) -> bool: """Does some amount of work on the next queued background update diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 246d2acc2f..b0e30daee5 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -346,6 +346,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "_get_rooms_for_local_user_where_membership_is_inner", (state_key,) ) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user", (state_key,) + ) self._attempt_to_invalidate_cache( "did_forget", @@ -417,6 +420,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "_get_rooms_for_local_user_where_membership_is_inner", None ) + self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) self._attempt_to_invalidate_cache("did_forget", None) self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) self._attempt_to_invalidate_cache("get_references_for_event", None) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index e819364a16..b227e05773 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -2342,6 +2342,17 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS return len(memberships_to_update_rows) + async def have_finished_sliding_sync_background_jobs(self) -> bool: + """Return if its safe to use the sliding sync membership tables.""" + + return await self.db_pool.updates.have_completed_background_updates( + ( + _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, + _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE, + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + ) + ) + def _resolve_stale_data_in_sliding_sync_tables( txn: LoggingTransaction, diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 722686d4b8..57b9b95c28 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -53,7 +53,12 @@ from synapse.storage.database import ( from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import Sqlite3Engine -from synapse.storage.roommember import MemberSummary, ProfileInfo, RoomsForUser +from synapse.storage.roommember import ( + MemberSummary, + ProfileInfo, + RoomsForUser, + RoomsForUserSlidingSync, +) from synapse.types import ( JsonDict, PersistedEventPosition, @@ -1377,6 +1382,54 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): desc="room_forgetter_stream_pos", ) + @cached(iterable=True, max_entries=10000) + async def get_sliding_sync_rooms_for_user( + self, + user_id: str, + ) -> Mapping[str, RoomsForUserSlidingSync]: + """Get all the rooms for a user to handle a sliding sync request. + + Ignores forgotten rooms and rooms that the user has been kicked from. + + Returns: + Map from room ID to membership info + """ + + def get_sliding_sync_rooms_for_user_txn( + txn: LoggingTransaction, + ) -> Dict[str, RoomsForUserSlidingSync]: + sql = """ + SELECT m.room_id, m.sender, m.membership, m.membership_event_id, + r.room_version, + m.event_instance_name, m.event_stream_ordering, + COALESCE(j.room_type, m.room_type), + COALESCE(j.is_encrypted, m.is_encrypted) + FROM sliding_sync_membership_snapshots AS m + INNER JOIN rooms AS r USING (room_id) + LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join') + WHERE user_id = ? + AND m.forgotten = 0 + """ + txn.execute(sql, (user_id,)) + return { + row[0]: RoomsForUserSlidingSync( + room_id=row[0], + sender=row[1], + membership=row[2], + event_id=row[3], + room_version_id=row[4], + event_pos=PersistedEventPosition(row[5], row[6]), + room_type=row[7], + is_encrypted=row[8], + ) + for row in txn + } + + return await self.db_pool.runInteraction( + "get_sliding_sync_rooms_for_user", + get_sliding_sync_rooms_for_user_txn, + ) + class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__( diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 80c9630867..09213627ec 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -39,6 +39,19 @@ class RoomsForUser: room_version_id: str +@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) +class RoomsForUserSlidingSync: + room_id: str + sender: Optional[str] + membership: str + event_id: Optional[str] + event_pos: PersistedEventPosition + room_version_id: str + + room_type: Optional[str] + is_encrypted: bool + + @attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) class GetRoomsForUserWithStreamOrdering: room_id: str diff --git a/tests/rest/client/sliding_sync/test_connection_tracking.py b/tests/rest/client/sliding_sync/test_connection_tracking.py index 6863c32f7c..436bd4466c 100644 --- a/tests/rest/client/sliding_sync/test_connection_tracking.py +++ b/tests/rest/client/sliding_sync/test_connection_tracking.py @@ -13,7 +13,7 @@ # import logging -from parameterized import parameterized +from parameterized import parameterized, parameterized_class from twisted.test.proto_helpers import MemoryReactor @@ -28,6 +28,18 @@ from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase): """ Test connection tracking in the Sliding Sync API. @@ -44,6 +56,8 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + super().prepare(reactor, clock, hs) + def test_rooms_required_state_incremental_sync_LIVE(self) -> None: """Test that we only get state updates in incremental sync for rooms we've already seen (LIVE). diff --git a/tests/rest/client/sliding_sync/test_extension_account_data.py b/tests/rest/client/sliding_sync/test_extension_account_data.py index 3482a5f887..65a6adf4af 100644 --- a/tests/rest/client/sliding_sync/test_extension_account_data.py +++ b/tests/rest/client/sliding_sync/test_extension_account_data.py @@ -13,6 +13,8 @@ # import logging +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -28,6 +30,18 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): """Tests for the account_data sliding sync extension""" @@ -43,6 +57,8 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.account_data_handler = hs.get_account_data_handler() + super().prepare(reactor, clock, hs) + def test_no_data_initial_sync(self) -> None: """ Test that enabling the account_data extension works during an intitial sync, diff --git a/tests/rest/client/sliding_sync/test_extension_e2ee.py b/tests/rest/client/sliding_sync/test_extension_e2ee.py index 320f8c788f..2ff6687796 100644 --- a/tests/rest/client/sliding_sync/test_extension_e2ee.py +++ b/tests/rest/client/sliding_sync/test_extension_e2ee.py @@ -13,6 +13,8 @@ # import logging +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -27,6 +29,18 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): """Tests for the e2ee sliding sync extension""" @@ -42,6 +56,8 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.e2e_keys_handler = hs.get_e2e_keys_handler() + super().prepare(reactor, clock, hs) + def test_no_data_initial_sync(self) -> None: """ Test that enabling e2ee extension works during an intitial sync, even if there diff --git a/tests/rest/client/sliding_sync/test_extension_receipts.py b/tests/rest/client/sliding_sync/test_extension_receipts.py index e842349ed2..90b035dd75 100644 --- a/tests/rest/client/sliding_sync/test_extension_receipts.py +++ b/tests/rest/client/sliding_sync/test_extension_receipts.py @@ -13,6 +13,8 @@ # import logging +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -28,6 +30,18 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): """Tests for the receipts sliding sync extension""" @@ -42,6 +56,8 @@ class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + super().prepare(reactor, clock, hs) + def test_no_data_initial_sync(self) -> None: """ Test that enabling the receipts extension works during an intitial sync, diff --git a/tests/rest/client/sliding_sync/test_extension_to_device.py b/tests/rest/client/sliding_sync/test_extension_to_device.py index f8500812ea..5ba2443089 100644 --- a/tests/rest/client/sliding_sync/test_extension_to_device.py +++ b/tests/rest/client/sliding_sync/test_extension_to_device.py @@ -14,6 +14,8 @@ import logging from typing import List +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -28,6 +30,18 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): """Tests for the to-device sliding sync extension""" @@ -40,6 +54,7 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + super().prepare(reactor, clock, hs) def _assert_to_device_response( self, response_body: JsonDict, expected_messages: List[JsonDict] diff --git a/tests/rest/client/sliding_sync/test_extension_typing.py b/tests/rest/client/sliding_sync/test_extension_typing.py index 7f523e0f10..0a0f5aff1a 100644 --- a/tests/rest/client/sliding_sync/test_extension_typing.py +++ b/tests/rest/client/sliding_sync/test_extension_typing.py @@ -13,6 +13,8 @@ # import logging +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -28,6 +30,18 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncTypingExtensionTestCase(SlidingSyncBase): """Tests for the typing notification sliding sync extension""" @@ -41,6 +55,8 @@ class SlidingSyncTypingExtensionTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + super().prepare(reactor, clock, hs) + def test_no_data_initial_sync(self) -> None: """ Test that enabling the typing extension works during an intitial sync, diff --git a/tests/rest/client/sliding_sync/test_extensions.py b/tests/rest/client/sliding_sync/test_extensions.py index ae823d5415..32478467aa 100644 --- a/tests/rest/client/sliding_sync/test_extensions.py +++ b/tests/rest/client/sliding_sync/test_extensions.py @@ -14,7 +14,7 @@ import logging from typing import Literal -from parameterized import parameterized +from parameterized import parameterized, parameterized_class from typing_extensions import assert_never from twisted.test.proto_helpers import MemoryReactor @@ -30,6 +30,18 @@ from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncExtensionsTestCase(SlidingSyncBase): """ Test general extensions behavior in the Sliding Sync API. Each extension has their @@ -49,6 +61,8 @@ class SlidingSyncExtensionsTestCase(SlidingSyncBase): self.storage_controllers = hs.get_storage_controllers() self.account_data_handler = hs.get_account_data_handler() + super().prepare(reactor, clock, hs) + # Any extensions that use `lists`/`rooms` should be tested here @parameterized.expand([("account_data",), ("receipts",), ("typing",)]) def test_extensions_lists_rooms_relevant_rooms( diff --git a/tests/rest/client/sliding_sync/test_room_subscriptions.py b/tests/rest/client/sliding_sync/test_room_subscriptions.py index cc17b0b354..e81d251839 100644 --- a/tests/rest/client/sliding_sync/test_room_subscriptions.py +++ b/tests/rest/client/sliding_sync/test_room_subscriptions.py @@ -14,6 +14,8 @@ import logging from http import HTTPStatus +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -27,6 +29,18 @@ from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncRoomSubscriptionsTestCase(SlidingSyncBase): """ Test `room_subscriptions` in the Sliding Sync API. @@ -43,6 +57,8 @@ class SlidingSyncRoomSubscriptionsTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + super().prepare(reactor, clock, hs) + def test_room_subscriptions_with_join_membership(self) -> None: """ Test `room_subscriptions` with a joined room should give us timeline and current diff --git a/tests/rest/client/sliding_sync/test_rooms_invites.py b/tests/rest/client/sliding_sync/test_rooms_invites.py index f08ffaf674..f6f45c2500 100644 --- a/tests/rest/client/sliding_sync/test_rooms_invites.py +++ b/tests/rest/client/sliding_sync/test_rooms_invites.py @@ -13,6 +13,8 @@ # import logging +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -27,6 +29,18 @@ from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncRoomsInvitesTestCase(SlidingSyncBase): """ Test to make sure the `rooms` response looks good for invites in the Sliding Sync API. @@ -49,6 +63,8 @@ class SlidingSyncRoomsInvitesTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + super().prepare(reactor, clock, hs) + def test_rooms_invite_shared_history_initial_sync(self) -> None: """ Test that `rooms` we are invited to have some stripped `invite_state` during an diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 690912133a..71542923da 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -13,6 +13,8 @@ # import logging +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -28,6 +30,18 @@ from tests.test_utils.event_injection import create_event logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): """ Test rooms meta info like name, avatar, joined_count, invited_count, is_dm, @@ -49,6 +63,8 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): assert persistence is not None self.persistence = persistence + super().prepare(reactor, clock, hs) + def test_rooms_meta_when_joined(self) -> None: """ Test that the `rooms` `name` and `avatar` are included in the response and diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index 498c921cbd..436ae684da 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -13,7 +13,7 @@ # import logging -from parameterized import parameterized +from parameterized import parameterized, parameterized_class from twisted.test.proto_helpers import MemoryReactor @@ -30,6 +30,18 @@ from tests.test_utils.event_injection import mark_event_as_partial_state logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): """ Test `rooms.required_state` in the Sliding Sync API. @@ -46,6 +58,8 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + super().prepare(reactor, clock, hs) + def test_rooms_no_required_state(self) -> None: """ Empty `rooms.required_state` should not return any state events in the room diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py index eeac0d6aa9..e56fb58012 100644 --- a/tests/rest/client/sliding_sync/test_rooms_timeline.py +++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py @@ -14,6 +14,8 @@ import logging from typing import List, Optional +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -28,6 +30,18 @@ from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): """ Test `rooms.timeline` in the Sliding Sync API. @@ -44,6 +58,8 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + super().prepare(reactor, clock, hs) + def _assertListEqual( self, actual_items: StrSequence, diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index cb7638c5ba..1dcc15b082 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -13,7 +13,9 @@ # import logging from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple +from unittest.mock import AsyncMock +from parameterized import parameterized_class from typing_extensions import assert_never from twisted.test.proto_helpers import MemoryReactor @@ -47,8 +49,25 @@ logger = logging.getLogger(__name__) class SlidingSyncBase(unittest.HomeserverTestCase): """Base class for sliding sync test cases""" + # Flag as to whether to use the new sliding sync tables or not + # + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + use_new_tables: bool = True + sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + hs.get_datastores().main.have_finished_sliding_sync_background_jobs = AsyncMock( # type: ignore[method-assign] + return_value=self.use_new_tables + ) + def default_config(self) -> JsonDict: config = super().default_config() # Enable sliding sync @@ -203,6 +222,18 @@ class SlidingSyncBase(unittest.HomeserverTestCase): ) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncTestCase(SlidingSyncBase): """ Tests regarding MSC3575 Sliding Sync `/sync` endpoint. @@ -226,6 +257,8 @@ class SlidingSyncTestCase(SlidingSyncBase): self.storage_controllers = hs.get_storage_controllers() self.account_data_handler = hs.get_account_data_handler() + super().prepare(reactor, clock, hs) + def _add_new_dm_to_global_account_data( self, source_user_id: str, target_user_id: str, target_room_id: str ) -> None: From 7d52ce7d4b837fb5bdb43734540b22ed35c11038 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 2 Sep 2024 13:39:04 +0200 Subject: [PATCH 195/332] Format files with Ruff (#17643) I thought ruff check would also format, but it doesn't. This runs ruff format in CI and dev scripts. The first commit is just a run of `ruff format .` in the root directory. --- .github/workflows/fix_lint.yaml | 6 ++- .github/workflows/tests.yml | 5 +- changelog.d/17643.misc | 1 + contrib/cmdclient/console.py | 3 +- scripts-dev/check_pydantic_models.py | 1 + scripts-dev/lint.sh | 3 ++ scripts-dev/release.py | 3 +- stubs/txredisapi.pyi | 4 +- synapse/__init__.py | 3 +- synapse/_scripts/generate_workers_map.py | 4 +- synapse/_scripts/review_recent_signups.py | 7 ++- synapse/_scripts/synapse_port_db.py | 26 +++++----- synapse/api/urls.py | 3 +- synapse/appservice/scheduler.py | 1 + synapse/config/key.py | 11 ++-- synapse/config/logger.py | 7 +-- synapse/config/server.py | 14 ++---- synapse/config/workers.py | 9 ++-- synapse/event_auth.py | 3 +- synapse/events/presence_router.py | 2 +- synapse/events/snapshot.py | 6 +-- synapse/federation/__init__.py | 3 +- synapse/federation/persistence.py | 2 +- .../federation/transport/server/federation.py | 1 - synapse/federation/units.py | 2 +- synapse/handlers/account.py | 8 +-- synapse/handlers/admin.py | 17 ++++--- synapse/handlers/auth.py | 3 +- synapse/handlers/directory.py | 14 +++--- synapse/handlers/federation.py | 10 ++-- synapse/handlers/identity.py | 1 + synapse/handlers/message.py | 7 ++- synapse/handlers/pagination.py | 38 +++++++------- synapse/handlers/presence.py | 13 ++--- synapse/handlers/profile.py | 6 +-- synapse/handlers/relations.py | 14 +++--- synapse/handlers/room.py | 9 ++-- synapse/handlers/room_member.py | 16 +++--- synapse/handlers/search.py | 6 +-- synapse/handlers/sliding_sync/__init__.py | 4 +- synapse/handlers/sliding_sync/extensions.py | 12 ++--- synapse/handlers/sliding_sync/room_lists.py | 18 ++++--- synapse/handlers/sync.py | 50 +++++++++---------- synapse/handlers/worker_lock.py | 2 +- synapse/http/client.py | 3 +- synapse/http/server.py | 2 +- synapse/logging/_terse_json.py | 1 + synapse/logging/context.py | 5 +- synapse/logging/opentracing.py | 12 +++-- synapse/metrics/background_process_metrics.py | 2 +- synapse/push/bulk_push_rule_evaluator.py | 9 ++-- synapse/replication/http/federation.py | 4 +- synapse/replication/http/push.py | 5 +- synapse/replication/tcp/client.py | 4 +- synapse/replication/tcp/commands.py | 1 + synapse/replication/tcp/handler.py | 2 +- synapse/replication/tcp/protocol.py | 1 + synapse/replication/tcp/resource.py | 5 +- synapse/replication/tcp/streams/_base.py | 2 +- synapse/rest/admin/registration_tokens.py | 3 +- synapse/rest/client/_base.py | 4 +- synapse/rest/client/account_data.py | 6 +-- synapse/rest/client/account_validity.py | 4 +- synapse/rest/client/events.py | 1 + synapse/rest/client/presence.py | 4 +- synapse/rest/client/profile.py | 2 +- synapse/rest/client/register.py | 18 +++---- synapse/rest/client/room.py | 3 +- synapse/rest/client/sync.py | 12 ++--- synapse/rest/client/transactions.py | 1 + synapse/rest/key/v2/remote_key_resource.py | 8 +-- synapse/rest/well_known.py | 6 +-- .../resource_limits_server_notices.py | 4 +- synapse/storage/controllers/persist_events.py | 8 +-- synapse/storage/databases/main/client_ips.py | 8 +-- synapse/storage/databases/main/deviceinbox.py | 2 +- synapse/storage/databases/main/devices.py | 12 ++--- .../storage/databases/main/e2e_room_keys.py | 4 +- .../storage/databases/main/end_to_end_keys.py | 12 ++--- .../databases/main/event_federation.py | 16 +++--- .../databases/main/event_push_actions.py | 4 +- synapse/storage/databases/main/events.py | 16 ++---- .../databases/main/events_bg_updates.py | 28 +++++------ .../storage/databases/main/events_worker.py | 8 +-- .../storage/databases/main/purge_events.py | 4 +- synapse/storage/databases/main/receipts.py | 6 +-- .../storage/databases/main/registration.py | 4 +- synapse/storage/databases/main/room.py | 4 +- synapse/storage/databases/main/roommember.py | 24 ++++----- synapse/storage/databases/main/search.py | 4 +- synapse/storage/databases/main/state.py | 2 +- synapse/storage/databases/main/stats.py | 8 ++- synapse/storage/databases/main/stream.py | 6 +-- .../storage/databases/main/user_directory.py | 4 +- synapse/storage/databases/state/store.py | 2 +- synapse/storage/prepare_database.py | 4 +- .../main/delta/56/unique_user_filter_index.py | 4 +- .../main/delta/61/03recreate_min_depth.py | 1 + .../68/05partial_state_rooms_triggers.py | 1 + ..._update_current_state_events_membership.py | 1 + ...p_tables_event_stream_ordering_triggers.py | 1 + .../78/03event_extremities_constraints.py | 1 + synapse/types/__init__.py | 2 +- synapse/types/rest/client/__init__.py | 8 ++- synapse/types/state.py | 18 ++++--- synapse/util/linked_list.py | 3 +- synapse/util/metrics.py | 2 +- synapse/util/patch_inline_callbacks.py | 2 +- synapse/util/ratelimitutils.py | 2 +- synapse/visibility.py | 6 +-- synmark/__main__.py | 2 +- tests/appservice/test_scheduler.py | 3 +- tests/events/test_utils.py | 3 +- tests/federation/test_complexity.py | 4 +- tests/federation/test_federation_catch_up.py | 5 +- tests/federation/test_federation_media.py | 2 - tests/handlers/test_federation_event.py | 16 +++--- tests/handlers/test_presence.py | 8 ++- tests/handlers/test_sync.py | 4 +- tests/http/federation/test_srv_resolver.py | 12 ++--- tests/http/test_client.py | 4 +- tests/http/test_matrixfederationclient.py | 32 ++++++------ tests/http/test_servlet.py | 2 +- tests/media/test_media_storage.py | 2 +- tests/module_api/test_account_data_manager.py | 4 +- tests/push/test_email.py | 1 + tests/rest/admin/test_server_notice.py | 12 ++--- .../sliding_sync/test_connection_tracking.py | 4 +- .../test_extension_account_data.py | 4 +- .../sliding_sync/test_extension_e2ee.py | 4 +- .../sliding_sync/test_extension_receipts.py | 4 +- .../sliding_sync/test_extension_to_device.py | 4 +- .../sliding_sync/test_extension_typing.py | 4 +- .../client/sliding_sync/test_extensions.py | 4 +- .../sliding_sync/test_room_subscriptions.py | 4 +- .../client/sliding_sync/test_rooms_invites.py | 4 +- .../client/sliding_sync/test_rooms_meta.py | 4 +- .../sliding_sync/test_rooms_required_state.py | 4 +- .../sliding_sync/test_rooms_timeline.py | 4 +- .../client/sliding_sync/test_sliding_sync.py | 4 +- tests/rest/client/test_auth_issuer.py | 4 +- tests/rest/client/test_events.py | 2 +- tests/rest/client/test_media.py | 4 +- tests/rest/client/test_profile.py | 1 + tests/rest/client/test_register.py | 4 +- tests/rest/client/utils.py | 43 ++++++++-------- tests/rest/test_well_known.py | 11 +++- tests/test_event_auth.py | 14 +++--- tests/test_federation.py | 4 +- tests/test_types.py | 4 +- tests/test_utils/__init__.py | 1 + tests/unittest.py | 4 +- 152 files changed, 526 insertions(+), 492 deletions(-) create mode 100644 changelog.d/17643.misc diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml index 5970b4e826..909b0a847f 100644 --- a/.github/workflows/fix_lint.yaml +++ b/.github/workflows/fix_lint.yaml @@ -29,10 +29,14 @@ jobs: with: install-project: "false" - - name: Run ruff + - name: Run ruff check continue-on-error: true run: poetry run ruff check --fix . + - name: Run ruff format + continue-on-error: true + run: poetry run ruff format --quiet . + - run: cargo clippy --all-features --fix -- -D warnings continue-on-error: true diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index add046ec6a..5586bd6d94 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -131,9 +131,12 @@ jobs: with: install-project: "false" - - name: Check style + - name: Run ruff check run: poetry run ruff check --output-format=github . + - name: Run ruff format + run: poetry run ruff format --check . + lint-mypy: runs-on: ubuntu-latest name: Typechecking diff --git a/changelog.d/17643.misc b/changelog.d/17643.misc new file mode 100644 index 0000000000..f583cdcb38 --- /dev/null +++ b/changelog.d/17643.misc @@ -0,0 +1 @@ +Replace `isort` and `black with `ruff`. diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py index d4ddeb4dc7..ca2e72b5e8 100755 --- a/contrib/cmdclient/console.py +++ b/contrib/cmdclient/console.py @@ -21,7 +21,8 @@ # # -""" Starts a synapse client console. """ +"""Starts a synapse client console.""" + import argparse import binascii import cmd diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py index 9e67375b6a..26d667aba0 100755 --- a/scripts-dev/check_pydantic_models.py +++ b/scripts-dev/check_pydantic_models.py @@ -31,6 +31,7 @@ Pydantic does not yet offer a strict mode, but it is planned for pydantic v2. Se until then, this script is a best effort to stop us from introducing type coersion bugs (like the infamous stringy power levels fixed in room version 10). """ + import argparse import contextlib import functools diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index fa6ff90708..c656047729 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -109,6 +109,9 @@ set -x # --quiet suppresses the update check. ruff check --quiet --fix "${files[@]}" +# Reformat Python code. +ruff format --quiet "${files[@]}" + # Catch any common programming mistakes in Rust code. # # --bins, --examples, --lib, --tests combined explicitly disable checking diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 1ace804682..4435624267 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -20,8 +20,7 @@ # # -"""An interactive script for doing a release. See `cli()` below. -""" +"""An interactive script for doing a release. See `cli()` below.""" import glob import json diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index a141218d3d..c9a4114b1e 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Contains *incomplete* type hints for txredisapi. -""" +"""Contains *incomplete* type hints for txredisapi.""" + from typing import Any, List, Optional, Type, Union from twisted.internet import protocol diff --git a/synapse/__init__.py b/synapse/__init__.py index 99ed7a5374..73b92f12be 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -20,8 +20,7 @@ # # -""" This is an implementation of a Matrix homeserver. -""" +"""This is an implementation of a Matrix homeserver.""" import os import sys diff --git a/synapse/_scripts/generate_workers_map.py b/synapse/_scripts/generate_workers_map.py index 715c7ddc17..09feb8cf30 100755 --- a/synapse/_scripts/generate_workers_map.py +++ b/synapse/_scripts/generate_workers_map.py @@ -171,7 +171,7 @@ def elide_http_methods_if_unconflicting( """ def paths_to_methods_dict( - methods_and_paths: Iterable[Tuple[str, str]] + methods_and_paths: Iterable[Tuple[str, str]], ) -> Dict[str, Set[str]]: """ Given (method, path) pairs, produces a dict from path to set of methods @@ -201,7 +201,7 @@ def elide_http_methods_if_unconflicting( def simplify_path_regexes( - registrations: Dict[Tuple[str, str], EndpointDescription] + registrations: Dict[Tuple[str, str], EndpointDescription], ) -> Dict[Tuple[str, str], EndpointDescription]: """ Simplify all the path regexes for the dict of endpoint descriptions, diff --git a/synapse/_scripts/review_recent_signups.py b/synapse/_scripts/review_recent_signups.py index ad88df477a..62723c539d 100644 --- a/synapse/_scripts/review_recent_signups.py +++ b/synapse/_scripts/review_recent_signups.py @@ -40,6 +40,7 @@ from synapse.storage.engines import create_engine class ReviewConfig(RootConfig): "A config class that just pulls out the database config" + config_classes = [DatabaseConfig] @@ -160,7 +161,11 @@ def main() -> None: with make_conn(database_config, engine, "review_recent_signups") as db_conn: # This generates a type of Cursor, not LoggingTransaction. - user_infos = get_recent_users(db_conn.cursor(), since_ms, exclude_users_with_appservice) # type: ignore[arg-type] + user_infos = get_recent_users( + db_conn.cursor(), + since_ms, # type: ignore[arg-type] + exclude_users_with_appservice, + ) for user_info in user_infos: if exclude_users_with_email and user_info.emails: diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 195c95d376..31639d366e 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -717,9 +717,7 @@ class Porter: return # Check if all background updates are done, abort if not. - updates_complete = ( - await self.sqlite_store.db_pool.updates.has_completed_background_updates() - ) + updates_complete = await self.sqlite_store.db_pool.updates.has_completed_background_updates() if not updates_complete: end_error = ( "Pending background updates exist in the SQLite3 database." @@ -1095,10 +1093,10 @@ class Porter: return done, remaining + done async def _setup_state_group_id_seq(self) -> None: - curr_id: Optional[int] = ( - await self.sqlite_store.db_pool.simple_select_one_onecol( - table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True - ) + curr_id: Optional[ + int + ] = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True ) if not curr_id: @@ -1186,13 +1184,13 @@ class Porter: ) async def _setup_auth_chain_sequence(self) -> None: - curr_chain_id: Optional[int] = ( - await self.sqlite_store.db_pool.simple_select_one_onecol( - table="event_auth_chains", - keyvalues={}, - retcol="MAX(chain_id)", - allow_none=True, - ) + curr_chain_id: Optional[ + int + ] = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="event_auth_chains", + keyvalues={}, + retcol="MAX(chain_id)", + allow_none=True, ) def r(txn: LoggingTransaction) -> None: diff --git a/synapse/api/urls.py b/synapse/api/urls.py index d077a2c613..03a3e96f28 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -19,7 +19,8 @@ # # -"""Contains the URL paths to prefix various aspects of the server with. """ +"""Contains the URL paths to prefix various aspects of the server with.""" + import hmac from hashlib import sha256 from urllib.parse import urlencode diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index bec83419a2..7994da0868 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -54,6 +54,7 @@ UP & quit +---------- YES SUCCESS This is all tied together by the AppServiceScheduler which DIs the required components. """ + import logging from typing import ( TYPE_CHECKING, diff --git a/synapse/config/key.py b/synapse/config/key.py index b9925a52d2..bc96888967 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -200,16 +200,13 @@ class KeyConfig(Config): ) form_secret = 'form_secret: "%s"' % random_string_with_symbols(50) - return ( - """\ + return """\ %(macaroon_secret_key)s %(form_secret)s signing_key_path: "%(base_key_name)s.signing.key" trusted_key_servers: - server_name: "matrix.org" - """ - % locals() - ) + """ % locals() def read_signing_keys(self, signing_key_path: str, name: str) -> List[SigningKey]: """Read the signing keys in the given path. @@ -249,7 +246,9 @@ class KeyConfig(Config): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) - verify_key: "VerifyKeyWithExpiry" = decode_verify_key_bytes(key_id, key_bytes) # type: ignore[assignment] + verify_key: "VerifyKeyWithExpiry" = decode_verify_key_bytes( + key_id, key_bytes + ) # type: ignore[assignment] verify_key.expired = key_data["expired_ts"] keys[key_id] = verify_key else: diff --git a/synapse/config/logger.py b/synapse/config/logger.py index fca0b08d6d..cfc1a57107 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -157,12 +157,9 @@ class LoggingConfig(Config): self, config_dir_path: str, server_name: str, **kwargs: Any ) -> str: log_config = os.path.join(config_dir_path, server_name + ".log.config") - return ( - """\ + return """\ log_config: "%(log_config)s" - """ - % locals() - ) + """ % locals() def read_arguments(self, args: argparse.Namespace) -> None: if args.no_redirect_stdio is not None: diff --git a/synapse/config/server.py b/synapse/config/server.py index fd52c0475c..488604a30c 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -828,13 +828,10 @@ class ServerConfig(Config): ).lstrip() if not unsecure_listeners: - unsecure_http_bindings = ( - """- port: %(unsecure_port)s + unsecure_http_bindings = """- port: %(unsecure_port)s tls: false type: http - x_forwarded: true""" - % locals() - ) + x_forwarded: true""" % locals() if not open_private_ports: unsecure_http_bindings += ( @@ -853,16 +850,13 @@ class ServerConfig(Config): if not secure_listeners: secure_http_bindings = "" - return ( - """\ + return """\ server_name: "%(server_name)s" pid_file: %(pid_file)s listeners: %(secure_http_bindings)s %(unsecure_http_bindings)s - """ - % locals() - ) + """ % locals() def read_arguments(self, args: argparse.Namespace) -> None: if args.manhole is not None: diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 7ecf349e4a..b013ffa354 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -328,10 +328,11 @@ class WorkerConfig(Config): ) # type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently - self.instance_map: Dict[ - str, InstanceLocationConfig - ] = parse_and_validate_mapping( - instance_map, InstanceLocationConfig # type: ignore[arg-type] + self.instance_map: Dict[str, InstanceLocationConfig] = ( + parse_and_validate_mapping( + instance_map, + InstanceLocationConfig, # type: ignore[arg-type] + ) ) # Map from type of streams to source, c.f. WriterLocations. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index f5abcde2db..b834547d11 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -887,7 +887,8 @@ def _check_power_levels( raise SynapseError(400, f"{v!r} must be an integer.") if k in {"events", "notifications", "users"}: if not isinstance(v, collections.abc.Mapping) or not all( - type(v) is int for v in v.values() # noqa: E721 + type(v) is int + for v in v.values() # noqa: E721 ): raise SynapseError( 400, diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py index 9cb053cd8e..9713b141bc 100644 --- a/synapse/events/presence_router.py +++ b/synapse/events/presence_router.py @@ -80,7 +80,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None: # All methods that the module provides should be async, but this wasn't enforced # in the old module system, so we wrap them if needed def async_wrapper( - f: Optional[Callable[P, R]] + f: Optional[Callable[P, R]], ) -> Optional[Callable[P, Awaitable[R]]]: # f might be None if the callback isn't implemented by the module. In this # case we don't want to register a callback at all so we return None. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 6b70ea94d1..dd21a6136b 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -504,7 +504,7 @@ class UnpersistedEventContext(UnpersistedEventContextBase): def _encode_state_group_delta( - state_group_delta: Dict[Tuple[int, int], StateMap[str]] + state_group_delta: Dict[Tuple[int, int], StateMap[str]], ) -> List[Tuple[int, int, Optional[List[Tuple[str, str, str]]]]]: if not state_group_delta: return [] @@ -517,7 +517,7 @@ def _encode_state_group_delta( def _decode_state_group_delta( - input: List[Tuple[int, int, List[Tuple[str, str, str]]]] + input: List[Tuple[int, int, List[Tuple[str, str, str]]]], ) -> Dict[Tuple[int, int], StateMap[str]]: if not input: return {} @@ -544,7 +544,7 @@ def _encode_state_dict( def _decode_state_dict( - input: Optional[List[Tuple[str, str, str]]] + input: Optional[List[Tuple[str, str, str]]], ) -> Optional[StateMap[str]]: """Decodes a state dict encoded using `_encode_state_dict` above""" if input is None: diff --git a/synapse/federation/__init__.py b/synapse/federation/__init__.py index a571eff590..61e28bff66 100644 --- a/synapse/federation/__init__.py +++ b/synapse/federation/__init__.py @@ -19,5 +19,4 @@ # # -""" This package includes all the federation specific logic. -""" +"""This package includes all the federation specific logic.""" diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 0bfde00315..8340b48503 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -20,7 +20,7 @@ # # -""" This module contains all the persistence actions done by the federation +"""This module contains all the persistence actions done by the federation package. These actions are mostly only used by the :py:mod:`.replication` module. diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 20f87c885e..a05e5d5319 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -859,7 +859,6 @@ class FederationMediaThumbnailServlet(BaseFederationServerServlet): request: SynapseRequest, media_id: str, ) -> None: - width = parse_integer(request, "width", required=True) height = parse_integer(request, "height", required=True) method = parse_string(request, "method", "scale") diff --git a/synapse/federation/units.py b/synapse/federation/units.py index b2c8ba5887..d8b67a6a5b 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -19,7 +19,7 @@ # # -""" Defines the JSON structure of the protocol units used by the server to +"""Defines the JSON structure of the protocol units used by the server to server protocol. """ diff --git a/synapse/handlers/account.py b/synapse/handlers/account.py index 89e944bc17..37cc3d3ff5 100644 --- a/synapse/handlers/account.py +++ b/synapse/handlers/account.py @@ -118,10 +118,10 @@ class AccountHandler: } if self._use_account_validity_in_account_status: - status["org.matrix.expired"] = ( - await self._account_validity_handler.is_user_expired( - user_id.to_string() - ) + status[ + "org.matrix.expired" + ] = await self._account_validity_handler.is_user_expired( + user_id.to_string() ) return status diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index b44e862493..c874d22eac 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -197,14 +197,15 @@ class AdminHandler: # events that we have and then filtering, this isn't the most # efficient method perhaps but it does guarantee we get everything. while True: - events, _ = ( - await self._store.paginate_room_events_by_topological_ordering( - room_id=room_id, - from_key=from_key, - to_key=to_key, - limit=100, - direction=Direction.FORWARDS, - ) + ( + events, + _, + ) = await self._store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_key, + to_key=to_key, + limit=100, + direction=Direction.FORWARDS, ) if not events: break diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index a1fab99f6b..1f4264ad7e 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -166,8 +166,7 @@ def login_id_phone_to_thirdparty(identifier: JsonDict) -> Dict[str, str]: if "country" not in identifier or ( # The specification requires a "phone" field, while Synapse used to require a "number" # field. Accept both for backwards compatibility. - "phone" not in identifier - and "number" not in identifier + "phone" not in identifier and "number" not in identifier ): raise SynapseError( 400, "Invalid phone-type identifier", errcode=Codes.INVALID_PARAM diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index ad2b0f5fcc..62ce16794f 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -265,9 +265,9 @@ class DirectoryHandler: async def get_association(self, room_alias: RoomAlias) -> JsonDict: room_id = None if self.hs.is_mine(room_alias): - result: Optional[RoomAliasMapping] = ( - await self.get_association_from_room_alias(room_alias) - ) + result: Optional[ + RoomAliasMapping + ] = await self.get_association_from_room_alias(room_alias) if result: room_id = result.room_id @@ -512,11 +512,9 @@ class DirectoryHandler: raise SynapseError(403, "Not allowed to publish room") # Check if publishing is blocked by a third party module - allowed_by_third_party_rules = ( - await ( - self._third_party_event_rules.check_visibility_can_be_modified( - room_id, visibility - ) + allowed_by_third_party_rules = await ( + self._third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility ) ) if not allowed_by_third_party_rules: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 299588e476..2b7aad5b58 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1001,11 +1001,11 @@ class FederationHandler: ) if include_auth_user_id: - event_content[EventContentFields.AUTHORISING_USER] = ( - await self._event_auth_handler.get_user_which_could_invite( - room_id, - state_ids, - ) + event_content[ + EventContentFields.AUTHORISING_USER + ] = await self._event_auth_handler.get_user_which_could_invite( + room_id, + state_ids, ) builder = self.event_builder_factory.for_room_version( diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index cb31d65aa9..89191217d6 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -21,6 +21,7 @@ # """Utilities for interacting with Identity Servers""" + import logging import urllib.parse from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5aa48230ec..204965afee 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1225,10 +1225,9 @@ class EventCreationHandler: ) if prev_event_ids is not None: - assert ( - len(prev_event_ids) <= 10 - ), "Attempting to create an event with %i prev_events" % ( - len(prev_event_ids), + assert len(prev_event_ids) <= 10, ( + "Attempting to create an event with %i prev_events" + % (len(prev_event_ids),) ) else: prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 6fd7afa280..3c44458fa3 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -507,15 +507,16 @@ class PaginationHandler: # Initially fetch the events from the database. With any luck, we can return # these without blocking on backfill (handled below). - events, next_key = ( - await self.store.paginate_room_events_by_topological_ordering( - room_id=room_id, - from_key=from_token.room_key, - to_key=to_room_key, - direction=pagin_config.direction, - limit=pagin_config.limit, - event_filter=event_filter, - ) + ( + events, + next_key, + ) = await self.store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, + event_filter=event_filter, ) if pagin_config.direction == Direction.BACKWARDS: @@ -584,15 +585,16 @@ class PaginationHandler: # If we did backfill something, refetch the events from the database to # catch anything new that might have been added since we last fetched. if did_backfill: - events, next_key = ( - await self.store.paginate_room_events_by_topological_ordering( - room_id=room_id, - from_key=from_token.room_key, - to_key=to_room_key, - direction=pagin_config.direction, - limit=pagin_config.limit, - event_filter=event_filter, - ) + ( + events, + next_key, + ) = await self.store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, + event_filter=event_filter, ) else: # Otherwise, we can backfill in the background for eventual diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 37ee625f71..390cafa8f6 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -71,6 +71,7 @@ user state; this device follows the normal timeout logic (see above) and will automatically be replaced with any information from currently available devices. """ + import abc import contextlib import itertools @@ -493,9 +494,9 @@ class WorkerPresenceHandler(BasePresenceHandler): # The number of ongoing syncs on this process, by (user ID, device ID). # Empty if _presence_enabled is false. - self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = ( - {} - ) + self._user_device_to_num_current_syncs: Dict[ + Tuple[str, Optional[str]], int + ] = {} self.notifier = hs.get_notifier() self.instance_id = hs.get_instance_id() @@ -818,9 +819,9 @@ class PresenceHandler(BasePresenceHandler): # Keeps track of the number of *ongoing* syncs on this process. While # this is non zero a user will never go offline. - self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = ( - {} - ) + self._user_device_to_num_current_syncs: Dict[ + Tuple[str, Optional[str]], int + ] = {} # Keeps track of the number of *ongoing* syncs on other processes. # diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index af8cd838ee..ac4544ca4c 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -351,9 +351,9 @@ class ProfileHandler: server_name = host if self._is_mine_server_name(server_name): - media_info: Optional[Union[LocalMedia, RemoteMedia]] = ( - await self.store.get_local_media(media_id) - ) + media_info: Optional[ + Union[LocalMedia, RemoteMedia] + ] = await self.store.get_local_media(media_id) else: media_info = await self.store.get_cached_remote_media(server_name, media_id) diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index efe31e81f9..b1158ee77d 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -188,13 +188,13 @@ class RelationsHandler: if include_original_event: # Do not bundle aggregations when retrieving the original event because # we want the content before relations are applied to it. - return_value["original_event"] = ( - await self._event_serializer.serialize_event( - event, - now, - bundle_aggregations=None, - config=serialize_options, - ) + return_value[ + "original_event" + ] = await self._event_serializer.serialize_event( + event, + now, + bundle_aggregations=None, + config=serialize_options, ) if next_token: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 2c6e672ede..35c88f1b91 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -20,6 +20,7 @@ # """Contains functions for performing actions on rooms.""" + import itertools import logging import math @@ -900,11 +901,9 @@ class RoomCreationHandler: ) # Check whether this visibility value is blocked by a third party module - allowed_by_third_party_rules = ( - await ( - self._third_party_event_rules.check_visibility_can_be_modified( - room_id, visibility - ) + allowed_by_third_party_rules = await ( + self._third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility ) ) if not allowed_by_third_party_rules: diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 51b9772329..75c60e3c34 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1302,11 +1302,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # If this is going to be a local join, additional information must # be included in the event content in order to efficiently validate # the event. - content[EventContentFields.AUTHORISING_USER] = ( - await self.event_auth_handler.get_user_which_could_invite( - room_id, - state_before_join, - ) + content[ + EventContentFields.AUTHORISING_USER + ] = await self.event_auth_handler.get_user_which_could_invite( + room_id, + state_before_join, ) return False, [] @@ -1415,9 +1415,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if requester is not None: sender = UserID.from_string(event.sender) - assert ( - sender == requester.user - ), "Sender (%s) must be same as requester (%s)" % (sender, requester.user) + assert sender == requester.user, ( + "Sender (%s) must be same as requester (%s)" % (sender, requester.user) + ) assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,) else: requester = types.create_requester(target_user) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index a7d52fa648..1a71135d5f 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -423,9 +423,9 @@ class SearchHandler: } if search_result.room_groups and "room_id" in group_keys: - rooms_cat_res.setdefault("groups", {})[ - "room_id" - ] = search_result.room_groups + rooms_cat_res.setdefault("groups", {})["room_id"] = ( + search_result.room_groups + ) if sender_group and "sender" in group_keys: rooms_cat_res.setdefault("groups", {})["sender"] = sender_group diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index d92bdad307..f79796a336 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -587,9 +587,7 @@ class SlidingSyncHandler: Membership.LEAVE, Membership.BAN, ): - to_bound = ( - room_membership_for_user_at_to_token.event_pos.to_room_stream_token() - ) + to_bound = room_membership_for_user_at_to_token.event_pos.to_room_stream_token() timeline_from_bound = from_bound if ignore_timeline_bound: diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index d9f4c56e6e..6f37cc3462 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -386,9 +386,9 @@ class SlidingSyncExtensionHandler: if have_push_rules_changed: global_account_data_map = dict(global_account_data_map) # TODO: This should take into account the `from_token` and `to_token` - global_account_data_map[AccountDataTypes.PUSH_RULES] = ( - await self.push_rules_handler.push_rules_for_user(sync_config.user) - ) + global_account_data_map[ + AccountDataTypes.PUSH_RULES + ] = await self.push_rules_handler.push_rules_for_user(sync_config.user) else: # TODO: This should take into account the `to_token` all_global_account_data = await self.store.get_global_account_data_for_user( @@ -397,9 +397,9 @@ class SlidingSyncExtensionHandler: global_account_data_map = dict(all_global_account_data) # TODO: This should take into account the `to_token` - global_account_data_map[AccountDataTypes.PUSH_RULES] = ( - await self.push_rules_handler.push_rules_for_user(sync_config.user) - ) + global_account_data_map[ + AccountDataTypes.PUSH_RULES + ] = await self.push_rules_handler.push_rules_for_user(sync_config.user) # Fetch room account data account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] = {} diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 12b7958c6f..1423d6ca53 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -293,10 +293,11 @@ class SlidingSyncRoomLists: is_encrypted=is_encrypted, ) - newly_joined_room_ids, newly_left_room_map = ( - await self._get_newly_joined_and_left_rooms( - user_id, from_token=from_token, to_token=to_token - ) + ( + newly_joined_room_ids, + newly_left_room_map, + ) = await self._get_newly_joined_and_left_rooms( + user_id, from_token=from_token, to_token=to_token ) dm_room_ids = await self._get_dm_rooms_for_user(user_id) @@ -958,10 +959,11 @@ class SlidingSyncRoomLists: else: rooms_for_user[room_id] = change_room_for_user - newly_joined_room_ids, newly_left_room_ids = ( - await self._get_newly_joined_and_left_rooms( - user_id, to_token=to_token, from_token=from_token - ) + ( + newly_joined_room_ids, + newly_left_room_ids, + ) = await self._get_newly_joined_and_left_rooms( + user_id, to_token=to_token, from_token=from_token ) dm_room_ids = await self._get_dm_rooms_for_user(user_id) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c44baa7042..609840bfe9 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -183,10 +183,7 @@ class JoinedSyncResult: to tell if room needs to be part of the sync result. """ return bool( - self.timeline - or self.state - or self.ephemeral - or self.account_data + self.timeline or self.state or self.ephemeral or self.account_data # nb the notification count does not, er, count: if there's nothing # else in the result, we don't need to send it. ) @@ -575,10 +572,10 @@ class SyncHandler: if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. - result: Union[SyncResult, E2eeSyncResult] = ( - await self.current_sync_for_user( - sync_config, sync_version, since_token, full_state=full_state - ) + result: Union[ + SyncResult, E2eeSyncResult + ] = await self.current_sync_for_user( + sync_config, sync_version, since_token, full_state=full_state ) else: # Otherwise, we wait for something to happen and report it to the user. @@ -673,10 +670,10 @@ class SyncHandler: # Go through the `/sync` v2 path if sync_version == SyncVersion.SYNC_V2: - sync_result: Union[SyncResult, E2eeSyncResult] = ( - await self.generate_sync_result( - sync_config, since_token, full_state - ) + sync_result: Union[ + SyncResult, E2eeSyncResult + ] = await self.generate_sync_result( + sync_config, since_token, full_state ) # Go through the MSC3575 Sliding Sync `/sync/e2ee` path elif sync_version == SyncVersion.E2EE_SYNC: @@ -1488,13 +1485,16 @@ class SyncHandler: # timeline here. The caller will then dedupe any redundant # ones. - state_ids = await self._state_storage_controller.get_state_ids_for_event( - batch.events[0].event_id, - # we only want members! - state_filter=StateFilter.from_types( - (EventTypes.Member, member) for member in members_to_fetch - ), - await_full_state=False, + state_ids = ( + await self._state_storage_controller.get_state_ids_for_event( + batch.events[0].event_id, + # we only want members! + state_filter=StateFilter.from_types( + (EventTypes.Member, member) + for member in members_to_fetch + ), + await_full_state=False, + ) ) return state_ids @@ -2166,18 +2166,18 @@ class SyncHandler: if push_rules_changed: global_account_data = dict(global_account_data) - global_account_data[AccountDataTypes.PUSH_RULES] = ( - await self._push_rules_handler.push_rules_for_user(sync_config.user) - ) + global_account_data[ + AccountDataTypes.PUSH_RULES + ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) else: all_global_account_data = await self.store.get_global_account_data_for_user( user_id ) global_account_data = dict(all_global_account_data) - global_account_data[AccountDataTypes.PUSH_RULES] = ( - await self._push_rules_handler.push_rules_for_user(sync_config.user) - ) + global_account_data[ + AccountDataTypes.PUSH_RULES + ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) account_data_for_user = ( await sync_config.filter_collection.filter_global_account_data( diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py index 7e578cf462..db998f6701 100644 --- a/synapse/handlers/worker_lock.py +++ b/synapse/handlers/worker_lock.py @@ -183,7 +183,7 @@ class WorkerLocksHandler: return def _wake_all_locks( - locks: Collection[Union[WaitingLock, WaitingMultiLock]] + locks: Collection[Union[WaitingLock, WaitingMultiLock]], ) -> None: for lock in locks: deferred = lock.deferred diff --git a/synapse/http/client.py b/synapse/http/client.py index cb4f72d771..143fee9796 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1313,6 +1313,5 @@ def is_unknown_endpoint( ) ) or ( # Older Synapses returned a 400 error. - e.code == 400 - and synapse_error.errcode == Codes.UNRECOGNIZED + e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED ) diff --git a/synapse/http/server.py b/synapse/http/server.py index 211795dc39..3e2d94d399 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -233,7 +233,7 @@ def return_html_error( def wrap_async_request_handler( - h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]] + h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]], ) -> Callable[["_AsyncResource", "SynapseRequest"], "defer.Deferred[None]"]: """Wraps an async request handler so that it calls request.processing. diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 6a6afbfc0b..d9ff70b252 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -22,6 +22,7 @@ """ Log formatters that output terse JSON. """ + import json import logging diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 4650b60962..ae2b3d11c0 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -20,7 +20,7 @@ # # -""" Thread-local-alike tracking of log contexts within synapse +"""Thread-local-alike tracking of log contexts within synapse This module provides objects and utilities for tracking contexts through synapse code, so that log lines can include a request identifier, and so that @@ -29,6 +29,7 @@ them. See doc/log_contexts.rst for details on how this works. """ + import logging import threading import typing @@ -751,7 +752,7 @@ def preserve_fn( f: Union[ Callable[P, R], Callable[P, Awaitable[R]], - ] + ], ) -> Callable[P, "defer.Deferred[R]"]: """Function decorator which wraps the function with run_in_background""" diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index e32b3f6781..d976e58e49 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -169,6 +169,7 @@ Gotchas than one caller? Will all of those calling functions have be in a context with an active span? """ + import contextlib import enum import inspect @@ -414,7 +415,7 @@ def ensure_active_span( """ def ensure_active_span_inner_1( - func: Callable[P, R] + func: Callable[P, R], ) -> Callable[P, Union[Optional[T], R]]: @wraps(func) def ensure_active_span_inner_2( @@ -700,7 +701,7 @@ def set_operation_name(operation_name: str) -> None: @only_if_tracing def force_tracing( - span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel + span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel, ) -> None: """Force sampling for the active/given span and its children. @@ -1093,9 +1094,10 @@ def trace_servlet( # Mypy seems to think that start_context.tag below can be Optional[str], but # that doesn't appear to be correct and works in practice. - request_tags[ - SynapseTags.REQUEST_TAG - ] = request.request_metrics.start_context.tag # type: ignore[assignment] + + request_tags[SynapseTags.REQUEST_TAG] = ( + request.request_metrics.start_context.tag # type: ignore[assignment] + ) # set the tags *after* the servlet completes, in case it decided to # prioritise the span (tags will get dropped on unprioritised spans) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 19c92b02a0..49d0ff9fc1 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -293,7 +293,7 @@ def wrap_as_background_process( """ def wrap_as_background_process_inner( - func: Callable[P, Awaitable[Optional[R]]] + func: Callable[P, Awaitable[Optional[R]]], ) -> Callable[P, "defer.Deferred[Optional[R]]"]: @wraps(func) def wrap_as_background_process_inner_2( diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 34ab637c3d..679cbe9afa 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -304,9 +304,9 @@ class BulkPushRuleEvaluator: if relation_type == "m.thread" and event.content.get( "m.relates_to", {} ).get("is_falling_back", False): - related_events["m.in_reply_to"][ - "im.vector.is_falling_back" - ] = "" + related_events["m.in_reply_to"]["im.vector.is_falling_back"] = ( + "" + ) return related_events @@ -372,7 +372,8 @@ class BulkPushRuleEvaluator: gather_results( ( run_in_background( # type: ignore[call-arg] - self.store.get_number_joined_users_in_room, event.room_id # type: ignore[arg-type] + self.store.get_number_joined_users_in_room, + event.room_id, # type: ignore[arg-type] ), run_in_background( self._get_power_levels_and_sender_level, diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 9c537427df..940f418396 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -119,7 +119,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): return payload - async def _handle_request(self, request: Request, content: JsonDict) -> Tuple[int, JsonDict]: # type: ignore[override] + async def _handle_request( # type: ignore[override] + self, request: Request, content: JsonDict + ) -> Tuple[int, JsonDict]: with Measure(self.clock, "repl_fed_send_events_parse"): room_id = content["room_id"] backfilled = content["backfilled"] diff --git a/synapse/replication/http/push.py b/synapse/replication/http/push.py index de07e75b46..2e06c43ce5 100644 --- a/synapse/replication/http/push.py +++ b/synapse/replication/http/push.py @@ -98,7 +98,9 @@ class ReplicationCopyPusherRestServlet(ReplicationEndpoint): self._store = hs.get_datastores().main @staticmethod - async def _serialize_payload(user_id: str, old_room_id: str, new_room_id: str) -> JsonDict: # type: ignore[override] + async def _serialize_payload( # type: ignore[override] + user_id: str, old_room_id: str, new_room_id: str + ) -> JsonDict: return {} async def _handle_request( # type: ignore[override] @@ -109,7 +111,6 @@ class ReplicationCopyPusherRestServlet(ReplicationEndpoint): old_room_id: str, new_room_id: str, ) -> Tuple[int, JsonDict]: - await self._store.copy_push_rules_from_room_to_room_for_user( old_room_id, new_room_id, user_id ) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 3dddbb70b4..0bd5478cd3 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -18,8 +18,8 @@ # [This file includes modifications made by New Vector Limited] # # -"""A replication client for use by synapse workers. -""" +"""A replication client for use by synapse workers.""" + import logging from typing import TYPE_CHECKING, Dict, Iterable, Optional, Set, Tuple diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index b7a7e77597..7d51441e91 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -23,6 +23,7 @@ The VALID_SERVER_COMMANDS and VALID_CLIENT_COMMANDS define which commands are allowed to be sent by which side. """ + import abc import logging from typing import List, Optional, Tuple, Type, TypeVar diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 72a42cb6cc..6101226938 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -857,7 +857,7 @@ UpdateRow = TypeVar("UpdateRow") def _batch_updates( - updates: Iterable[Tuple[UpdateToken, UpdateRow]] + updates: Iterable[Tuple[UpdateToken, UpdateRow]], ) -> Iterator[Tuple[UpdateToken, List[UpdateRow]]]: """Collect stream updates with the same token together diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 4471cc8f0c..fb9c539122 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -23,6 +23,7 @@ protocols. An explanation of this protocol is available in docs/tcp_replication.md """ + import fcntl import logging import struct diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index c0329378ac..d647a2b332 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -18,8 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -"""The server side of the replication stream. -""" +"""The server side of the replication stream.""" import logging import random @@ -307,7 +306,7 @@ class ReplicationStreamer: def _batch_updates( - updates: List[Tuple[Token, StreamRow]] + updates: List[Tuple[Token, StreamRow]], ) -> List[Tuple[Optional[Token], StreamRow]]: """Takes a list of updates of form [(token, row)] and sets the token to None for all rows where the next row has the same token. This is used to diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index d021904de7..ebf5964d29 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -247,7 +247,7 @@ class _StreamFromIdGen(Stream): def current_token_without_instance( - current_token: Callable[[], int] + current_token: Callable[[], int], ) -> Callable[[str], int]: """Takes a current token callback function for a single writer stream that doesn't take an instance name parameter and wraps it in a function that diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py index 0867f7a51c..bec2331590 100644 --- a/synapse/rest/admin/registration_tokens.py +++ b/synapse/rest/admin/registration_tokens.py @@ -181,8 +181,7 @@ class NewRegistrationTokenRestServlet(RestServlet): uses_allowed = body.get("uses_allowed", None) if not ( - uses_allowed is None - or (type(uses_allowed) is int and uses_allowed >= 0) # noqa: E721 + uses_allowed is None or (type(uses_allowed) is int and uses_allowed >= 0) # noqa: E721 ): raise SynapseError( HTTPStatus.BAD_REQUEST, diff --git a/synapse/rest/client/_base.py b/synapse/rest/client/_base.py index 93dec6375a..6cf37869d8 100644 --- a/synapse/rest/client/_base.py +++ b/synapse/rest/client/_base.py @@ -19,8 +19,8 @@ # # -"""This module contains base REST classes for constructing client v1 servlets. -""" +"""This module contains base REST classes for constructing client v1 servlets.""" + import logging import re from typing import Any, Awaitable, Callable, Iterable, Pattern, Tuple, TypeVar, cast diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py index 0ee24081fa..734c9e992f 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py @@ -108,9 +108,9 @@ class AccountDataServlet(RestServlet): # Push rules are stored in a separate table and must be queried separately. if account_data_type == AccountDataTypes.PUSH_RULES: - account_data: Optional[JsonMapping] = ( - await self._push_rules_handler.push_rules_for_user(requester.user) - ) + account_data: Optional[ + JsonMapping + ] = await self._push_rules_handler.push_rules_for_user(requester.user) else: account_data = await self.store.get_global_account_data_by_type_for_user( user_id, account_data_type diff --git a/synapse/rest/client/account_validity.py b/synapse/rest/client/account_validity.py index 6222a5cc37..ec7836b647 100644 --- a/synapse/rest/client/account_validity.py +++ b/synapse/rest/client/account_validity.py @@ -48,9 +48,7 @@ class AccountValidityRenewServlet(RestServlet): self.account_renewed_template = ( hs.config.account_validity.account_validity_account_renewed_template ) - self.account_previously_renewed_template = ( - hs.config.account_validity.account_validity_account_previously_renewed_template - ) + self.account_previously_renewed_template = hs.config.account_validity.account_validity_account_previously_renewed_template self.invalid_token_template = ( hs.config.account_validity.account_validity_invalid_token_template ) diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py index 613890061e..ad23cc76ce 100644 --- a/synapse/rest/client/events.py +++ b/synapse/rest/client/events.py @@ -20,6 +20,7 @@ # """This module contains REST servlets to do with event streaming, /events.""" + import logging from typing import TYPE_CHECKING, Dict, List, Tuple, Union diff --git a/synapse/rest/client/presence.py b/synapse/rest/client/presence.py index 572e92642c..ecc52956e4 100644 --- a/synapse/rest/client/presence.py +++ b/synapse/rest/client/presence.py @@ -19,8 +19,8 @@ # # -""" This module contains REST servlets to do with presence: /presence/ -""" +"""This module contains REST servlets to do with presence: /presence/""" + import logging from typing import TYPE_CHECKING, Tuple diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py index c1a80c5c3d..7a95b9445d 100644 --- a/synapse/rest/client/profile.py +++ b/synapse/rest/client/profile.py @@ -19,7 +19,7 @@ # # -""" This module contains REST servlets to do with profile: /profile/ """ +"""This module contains REST servlets to do with profile: /profile/""" from http import HTTPStatus from typing import TYPE_CHECKING, Tuple diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 5dddbc69be..61e1436841 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -640,12 +640,10 @@ class RegisterRestServlet(RestServlet): if not password_hash: raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM) - desired_username = ( - await ( - self.password_auth_provider.get_username_for_registration( - auth_result, - params, - ) + desired_username = await ( + self.password_auth_provider.get_username_for_registration( + auth_result, + params, ) ) @@ -696,11 +694,9 @@ class RegisterRestServlet(RestServlet): session_id ) - display_name = ( - await ( - self.password_auth_provider.get_displayname_for_registration( - auth_result, params - ) + display_name = await ( + self.password_auth_provider.get_displayname_for_registration( + auth_result, params ) ) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 7d57904d69..83f84e4998 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -19,7 +19,8 @@ # # -""" This module contains REST servlets to do with rooms: /rooms/ """ +"""This module contains REST servlets to do with rooms: /rooms/""" + import logging import re from enum import Enum diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 22c85e497a..cc9fbfe546 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -1045,9 +1045,9 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms[room_id]["initial"] = room_result.initial if room_result.unstable_expanded_timeline: - serialized_rooms[room_id][ - "unstable_expanded_timeline" - ] = room_result.unstable_expanded_timeline + serialized_rooms[room_id]["unstable_expanded_timeline"] = ( + room_result.unstable_expanded_timeline + ) # This will be omitted for invite/knock rooms with `stripped_state` if ( @@ -1082,9 +1082,9 @@ class SlidingSyncRestServlet(RestServlet): # This will be omitted for invite/knock rooms with `stripped_state` if room_result.prev_batch is not None: - serialized_rooms[room_id]["prev_batch"] = ( - await room_result.prev_batch.to_string(self.store) - ) + serialized_rooms[room_id][ + "prev_batch" + ] = await room_result.prev_batch.to_string(self.store) # This will be omitted for invite/knock rooms with `stripped_state` if room_result.num_live is not None: diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 30c1f17fc6..f791904168 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -21,6 +21,7 @@ """This module contains logic for storing HTTP PUT transactions. This is used to ensure idempotency when performing PUTs using the REST API.""" + import logging from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Hashable, Tuple diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 1975ebb477..3c2028a2ad 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -191,10 +191,10 @@ class RemoteKey(RestServlet): server_keys: Dict[Tuple[str, str], Optional[FetchKeyResultForRemote]] = {} for server_name, key_ids in query.items(): if key_ids: - results: Mapping[str, Optional[FetchKeyResultForRemote]] = ( - await self.store.get_server_keys_json_for_remote( - server_name, key_ids - ) + results: Mapping[ + str, Optional[FetchKeyResultForRemote] + ] = await self.store.get_server_keys_json_for_remote( + server_name, key_ids ) else: results = await self.store.get_all_server_keys_json_for_remote( diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index 989e570671..d336d60c93 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -65,9 +65,9 @@ class WellKnownBuilder: } account_management_url = await auth.account_management_url() if account_management_url is not None: - result["org.matrix.msc2965.authentication"][ - "account" - ] = account_management_url + result["org.matrix.msc2965.authentication"]["account"] = ( + account_management_url + ) if self._config.server.extra_well_known_client_content: for ( diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index f6ea90bd4f..e88e8c9b45 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -119,7 +119,9 @@ class ResourceLimitsServerNotices: elif not currently_blocked and limit_msg: # Room is not notifying of a block, when it ought to be. await self._apply_limit_block_notification( - user_id, limit_msg, limit_type # type: ignore + user_id, + limit_msg, + limit_type, # type: ignore ) except SynapseError as e: logger.error("Error sending resource limits server notice: %s", e) diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index ac0919340b..879ee9039e 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -416,7 +416,7 @@ class EventsPersistenceStorageController: set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled)) async def enqueue( - item: Tuple[str, List[Tuple[EventBase, EventContext]]] + item: Tuple[str, List[Tuple[EventBase, EventContext]]], ) -> Dict[str, str]: room_id, evs_ctxs = item return await self._event_persist_queue.add_to_queue( @@ -792,9 +792,9 @@ class EventsPersistenceStorageController: ) # Remove any events which are prev_events of any existing events. - existing_prevs: Collection[str] = ( - await self.persist_events_store._get_events_which_are_prevs(result) - ) + existing_prevs: Collection[ + str + ] = await self.persist_events_store._get_events_which_are_prevs(result) result.difference_update(existing_prevs) # Finally handle the case where the new events have soft-failed prev diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 4b66247640..bf6cfcbfd9 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -238,9 +238,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): INNER JOIN user_ips USING (user_id, access_token, ip) GROUP BY user_id, access_token, ip HAVING count(*) > 1 - """.format( - clause - ), + """.format(clause), args, ) res = cast( @@ -373,9 +371,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): LIMIT ? ) c INNER JOIN user_ips AS u USING (user_id, device_id, last_seen) - """ % { - "where_clause": where_clause - } + """ % {"where_clause": where_clause} txn.execute(sql, where_args + [batch_size]) rows = cast(List[Tuple[int, str, str, str, str]], txn.fetchall()) diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 042d595ea0..0612b82b9b 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -1116,7 +1116,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): txn.execute(sql, (start, stop)) - destinations = {d for d, in txn} + destinations = {d for (d,) in txn} to_remove = set() for d in destinations: try: diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 53024bddc3..a83df4075a 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -670,9 +670,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): result["keys"] = keys device_display_name = None - if ( - self.hs.config.federation.allow_device_name_lookup_over_federation - ): + if self.hs.config.federation.allow_device_name_lookup_over_federation: device_display_name = device.display_name if device_display_name: result["device_display_name"] = device_display_name @@ -917,7 +915,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): from_key, to_key, ) - return {u for u, in rows} + return {u for (u,) in rows} @cancellable async def get_users_whose_devices_changed( @@ -968,7 +966,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): txn.database_engine, "user_id", chunk ) txn.execute(sql % (clause,), [from_key, to_key] + args) - changes.update(user_id for user_id, in txn) + changes.update(user_id for (user_id,) in txn) return changes @@ -1520,7 +1518,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): args: List[Any], ) -> Set[str]: txn.execute(sql.format(clause=clause), args) - return {user_id for user_id, in txn} + return {user_id for (user_id,) in txn} changes = set() for chunk in batch_iter(changed_room_ids, 1000): @@ -1560,7 +1558,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): txn: LoggingTransaction, ) -> Set[str]: txn.execute(sql, (from_id, to_id)) - return {room_id for room_id, in txn} + return {room_id for (room_id,) in txn} return await self.db_pool.runInteraction( "get_all_device_list_changes", diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index 4d6a921ab2..c2c93e12d9 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -387,9 +387,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): is_verified, session_data FROM e2e_room_keys WHERE user_id = ? AND version = ? AND (%s) - """ % ( - " OR ".join(where_clauses) - ) + """ % (" OR ".join(where_clauses)) txn.execute(sql, params) diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 9e6c9561ae..575aaf498b 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -472,9 +472,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker signature_sql = """ SELECT user_id, key_id, target_device_id, signature FROM e2e_cross_signing_signatures WHERE %s - """ % ( - " OR ".join("(" + q + ")" for q in signature_query_clauses) - ) + """ % (" OR ".join("(" + q + ")" for q in signature_query_clauses)) txn.execute(signature_sql, signature_query_params) return cast( @@ -917,9 +915,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker FROM e2e_cross_signing_keys WHERE %(clause)s ORDER BY user_id, keytype, stream_id DESC - """ % { - "clause": clause - } + """ % {"clause": clause} else: # SQLite has special handling for bare columns when using # MIN/MAX with a `GROUP BY` clause where it picks the value from @@ -929,9 +925,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker FROM e2e_cross_signing_keys WHERE %(clause)s GROUP BY user_id, keytype - """ % { - "clause": clause - } + """ % {"clause": clause} txn.execute(sql, params) diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 715846865b..46aa5902d8 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -326,7 +326,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas """ rows = txn.execute_values(sql, chains.items()) - results.update(r for r, in rows) + results.update(r for (r,) in rows) else: # For SQLite we just fall back to doing a noddy for loop. sql = """ @@ -335,7 +335,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas """ for chain_id, max_no in chains.items(): txn.execute(sql, (chain_id, max_no)) - results.update(r for r, in txn) + results.update(r for (r,) in txn) return results @@ -645,7 +645,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas ] rows = txn.execute_values(sql, args) - result.update(r for r, in rows) + result.update(r for (r,) in rows) else: # For SQLite we just fall back to doing a noddy for loop. sql = """ @@ -654,7 +654,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas """ for chain_id, (min_no, max_no) in chain_to_gap.items(): txn.execute(sql, (chain_id, min_no, max_no)) - result.update(r for r, in txn) + result.update(r for (r,) in txn) return result @@ -1220,13 +1220,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas HAVING count(*) > ? ORDER BY count(*) DESC LIMIT ? - """ % ( - where_clause, - ) + """ % (where_clause,) query_args = list(itertools.chain(room_id_filter, [min_count, limit])) txn.execute(sql, query_args) - return [room_id for room_id, in txn] + return [room_id for (room_id,) in txn] return await self.db_pool.runInteraction( "get_rooms_with_many_extremities", _get_rooms_with_many_extremities_txn @@ -1358,7 +1356,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas def get_forward_extremeties_for_room_txn(txn: LoggingTransaction) -> List[str]: txn.execute(sql, (stream_ordering, room_id)) - return [event_id for event_id, in txn] + return [event_id for (event_id,) in txn] event_ids = await self.db_pool.runInteraction( "get_forward_extremeties_for_room", get_forward_extremeties_for_room_txn diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 0ebf5b53d5..f42023418e 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -1860,9 +1860,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas AND epa.notif = 1 ORDER BY epa.stream_ordering DESC LIMIT ? - """ % ( - before_clause, - ) + """ % (before_clause,) txn.execute(sql, args) return cast( List[Tuple[str, str, int, int, str, bool, str, int]], txn.fetchall() diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index e44b8d8e54..d423d80efa 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -429,9 +429,7 @@ class PersistEventsStore: if event_type == EventTypes.Member and self.is_mine_id(state_key) ] - membership_snapshot_shared_insert_values: ( - SlidingSyncMembershipSnapshotSharedInsertValues - ) = {} + membership_snapshot_shared_insert_values: SlidingSyncMembershipSnapshotSharedInsertValues = {} membership_infos_to_insert_membership_snapshots: List[ SlidingSyncMembershipInfo ] = [] @@ -719,7 +717,7 @@ class PersistEventsStore: keyvalues={}, retcols=("event_id",), ) - already_persisted_events = {event_id for event_id, in rows} + already_persisted_events = {event_id for (event_id,) in rows} state_events = [ event for event in state_events @@ -1830,12 +1828,8 @@ class PersistEventsStore: if sliding_sync_table_changes.to_insert_membership_snapshots: # Update the `sliding_sync_membership_snapshots` table # - sliding_sync_snapshot_keys = ( - sliding_sync_table_changes.membership_snapshot_shared_insert_values.keys() - ) - sliding_sync_snapshot_values = ( - sliding_sync_table_changes.membership_snapshot_shared_insert_values.values() - ) + sliding_sync_snapshot_keys = sliding_sync_table_changes.membership_snapshot_shared_insert_values.keys() + sliding_sync_snapshot_values = sliding_sync_table_changes.membership_snapshot_shared_insert_values.values() # We need to insert/update regardless of whether we have # `sliding_sync_snapshot_keys` because there are other fields in the `ON # CONFLICT` upsert to run (see inherit case (explained in @@ -3361,7 +3355,7 @@ class PersistEventsStore: ) potential_backwards_extremities.difference_update( - e for e, in existing_events_outliers + e for (e,) in existing_events_outliers ) if potential_backwards_extremities: diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index b227e05773..4209100a5c 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -647,7 +647,8 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS room_ids = {row[0] for row in rows} for room_id in room_ids: txn.call_after( - self.get_latest_event_ids_in_room.invalidate, (room_id,) # type: ignore[attr-defined] + self.get_latest_event_ids_in_room.invalidate, # type: ignore[attr-defined] + (room_id,), ) self.db_pool.simple_delete_many_txn( @@ -2065,9 +2066,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) # Map of values to insert/update in the `sliding_sync_membership_snapshots` table - sliding_sync_membership_snapshots_insert_map: ( - SlidingSyncMembershipSnapshotSharedInsertValues - ) = {} + sliding_sync_membership_snapshots_insert_map: SlidingSyncMembershipSnapshotSharedInsertValues = {} if membership == Membership.JOIN: # If we're still joined, we can pull from current state. current_state_ids_map: StateMap[ @@ -2149,14 +2148,15 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # membership (i.e. the room shouldn't disappear if your using the # `is_encrypted` filter and you leave). if membership in (Membership.LEAVE, Membership.BAN) and is_outlier: - invite_or_knock_event_id, invite_or_knock_membership = ( - await self.db_pool.runInteraction( - "sliding_sync_membership_snapshots_bg_update._find_previous_membership", - _find_previous_membership_txn, - room_id, - user_id, - membership_event_id, - ) + ( + invite_or_knock_event_id, + invite_or_knock_membership, + ) = await self.db_pool.runInteraction( + "sliding_sync_membership_snapshots_bg_update._find_previous_membership", + _find_previous_membership_txn, + room_id, + user_id, + membership_event_id, ) # Pull from the stripped state on the invite/knock event @@ -2484,9 +2484,7 @@ def _resolve_stale_data_in_sliding_sync_joined_rooms_table( "progress_json": "{}", }, ) - depends_on = ( - _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE - ) + depends_on = _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE # Now kick-off the background update to catch-up with what we missed while Synapse # was downgraded. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 1d83390827..b188f32927 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1665,7 +1665,7 @@ class EventsWorkerStore(SQLBaseStore): txn.database_engine, "e.event_id", event_ids ) txn.execute(sql + clause, args) - found_events = {eid for eid, in txn} + found_events = {eid for (eid,) in txn} # ... and then we can update the results for each key return {eid: (eid in found_events) for eid in event_ids} @@ -1864,9 +1864,9 @@ class EventsWorkerStore(SQLBaseStore): " LIMIT ?" ) txn.execute(sql, (-last_id, -current_id, instance_name, limit)) - new_event_updates: List[Tuple[int, Tuple[str, str, str, str, str, str]]] = ( - [] - ) + new_event_updates: List[ + Tuple[int, Tuple[str, str, str, str, str, str]] + ] = [] row: Tuple[int, str, str, str, str, str, str] # Type safety: iterating over `txn` yields `Tuple`, i.e. # `Tuple[Any, ...]` of arbitrary length. Mypy detects assigning a diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index fc4c286595..08244153a3 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -201,7 +201,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): txn.execute_batch( "INSERT INTO event_backward_extremities (room_id, event_id)" " VALUES (?, ?)", - [(room_id, event_id) for event_id, in new_backwards_extrems], + [(room_id, event_id) for (event_id,) in new_backwards_extrems], ) logger.info("[purge] finding state groups referenced by deleted events") @@ -215,7 +215,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): """ ) - referenced_state_groups = {sg for sg, in txn} + referenced_state_groups = {sg for (sg,) in txn} logger.info( "[purge] found %i referenced state groups", len(referenced_state_groups) ) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index bf10743574..9964331510 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -762,7 +762,7 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql, args) - return [room_id for room_id, in txn] + return [room_id for (room_id,) in txn] results: List[str] = [] for batch in batch_iter(room_ids, 1000): @@ -1030,9 +1030,7 @@ class ReceiptsWorkerStore(SQLBaseStore): SELECT event_id WHERE room_id = ? AND stream_ordering IN ( SELECT max(stream_ordering) WHERE %s ) - """ % ( - clause, - ) + """ % (clause,) txn.execute(sql, [room_id] + list(args)) rows = txn.fetchall() diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index df7f8a43b7..d7cbe33411 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1250,9 +1250,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): SELECT address, session_id, medium, client_secret, last_send_attempt, validated_at FROM threepid_validation_session WHERE %s - """ % ( - " AND ".join("%s = ?" % k for k in keyvalues.keys()), - ) + """ % (" AND ".join("%s = ?" % k for k in keyvalues.keys()),) if validated is not None: sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL") diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 80a4bf95f2..68b0806041 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1608,9 +1608,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): FROM event_reports AS er JOIN room_stats_state ON room_stats_state.room_id = er.room_id {} - """.format( - where_clause - ) + """.format(where_clause) txn.execute(sql, args) count = cast(Tuple[int], txn.fetchone())[0] diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 57b9b95c28..3d834b4bf1 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -232,9 +232,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): AND m.room_id = c.room_id AND m.user_id = c.state_key WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? AND %s - """ % ( - clause, - ) + """ % (clause,) txn.execute(sql, (room_id, Membership.JOIN, *ids)) return {r[0]: ProfileInfo(display_name=r[1], avatar_url=r[2]) for r in txn} @@ -531,9 +529,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): WHERE user_id = ? AND %s - """ % ( - clause, - ) + """ % (clause,) txn.execute(sql, (user_id, *args)) results = [ @@ -813,7 +809,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ txn.execute(sql, (user_id, *args)) - return {u: True for u, in txn} + return {u: True for (u,) in txn} to_return = {} for batch_user_ids in batch_iter(other_user_ids, 1000): @@ -1031,7 +1027,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): AND room_id = ? """ txn.execute(sql, (room_id,)) - return {d for d, in txn} + return {d for (d,) in txn} return await self.db_pool.runInteraction( "get_current_hosts_in_room", get_current_hosts_in_room_txn @@ -1099,7 +1095,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ txn.execute(sql, (room_id,)) # `server_domain` will be `NULL` for malformed MXIDs with no colons. - return tuple(d for d, in txn if d is not None) + return tuple(d for (d,) in txn if d is not None) return await self.db_pool.runInteraction( "get_current_hosts_in_room_ordered", get_current_hosts_in_room_ordered_txn @@ -1316,9 +1312,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): room_id = ? AND membership = ? AND NOT (%s) LIMIT 1 - """ % ( - clause, - ) + """ % (clause,) def _is_local_host_in_room_ignoring_users_txn( txn: LoggingTransaction, @@ -1464,10 +1458,12 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore): self, progress: JsonDict, batch_size: int ) -> int: target_min_stream_id = progress.get( - "target_min_stream_id_inclusive", self._min_stream_order_on_start # type: ignore[attr-defined] + "target_min_stream_id_inclusive", + self._min_stream_order_on_start, # type: ignore[attr-defined] ) max_stream_id = progress.get( - "max_stream_id_exclusive", self._stream_order_on_start + 1 # type: ignore[attr-defined] + "max_stream_id_exclusive", + self._stream_order_on_start + 1, # type: ignore[attr-defined] ) def add_membership_profile_txn(txn: LoggingTransaction) -> int: diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 20fcfd3122..b436275f3f 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -177,9 +177,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): AND (%s) ORDER BY stream_ordering DESC LIMIT ? - """ % ( - " OR ".join("type = '%s'" % (t,) for t in TYPES), - ) + """ % (" OR ".join("type = '%s'" % (t,) for t in TYPES),) txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 62bc4600fb..c5caaf56b0 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -535,7 +535,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): desc="check_if_events_in_current_state", ) - return frozenset(event_id for event_id, in rows) + return frozenset(event_id for (event_id,) in rows) # FIXME: how should this be cached? @cancellable diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index e9f6a918c7..79c49e7fd9 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -161,7 +161,7 @@ class StatsStore(StateDeltasStore): LIMIT ? """ txn.execute(sql, (last_user_id, batch_size)) - return [r for r, in txn] + return [r for (r,) in txn] users_to_work_on = await self.db_pool.runInteraction( "_populate_stats_process_users", _get_next_batch @@ -207,7 +207,7 @@ class StatsStore(StateDeltasStore): LIMIT ? """ txn.execute(sql, (last_room_id, batch_size)) - return [r for r, in txn] + return [r for (r,) in txn] rooms_to_work_on = await self.db_pool.runInteraction( "populate_stats_rooms_get_batch", _get_next_batch @@ -751,9 +751,7 @@ class StatsStore(StateDeltasStore): LEFT JOIN profiles AS p ON lmr.user_id = p.full_user_id {} GROUP BY lmr.user_id, displayname - """.format( - where_clause - ) + """.format(where_clause) # SQLite does not support SELECT COUNT(*) OVER() sql = """ diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 1a59e0b5a8..68d4168621 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -21,7 +21,7 @@ # # -""" This module is responsible for getting events from the DB for pagination +"""This module is responsible for getting events from the DB for pagination and event streaming. The order it returns events in depend on whether we are streaming forwards or @@ -1122,9 +1122,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): AND e.stream_ordering > ? AND e.stream_ordering <= ? %s ORDER BY e.stream_ordering ASC - """ % ( - ignore_room_clause, - ) + """ % (ignore_room_clause,) txn.execute(sql, args) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 6e18f714d7..51cffb0986 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -224,9 +224,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): SELECT room_id, events FROM %s ORDER BY events DESC LIMIT 250 - """ % ( - TEMP_TABLE + "_rooms", - ) + """ % (TEMP_TABLE + "_rooms",) txn.execute(sql) rooms_to_work_on = cast(List[Tuple[str, int]], txn.fetchall()) diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index d4ac74c1ee..aea71b8fcc 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -767,7 +767,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): remaining_state_groups = { state_group - for state_group, in rows + for (state_group,) in rows if state_group not in state_groups_to_delete } diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index aaffe5ecc9..bf087702ea 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -607,7 +607,7 @@ def _apply_module_schema_files( "SELECT file FROM applied_module_schemas WHERE module_name = ?", (modname,), ) - applied_deltas = {d for d, in cur} + applied_deltas = {d for (d,) in cur} for name, stream in names_and_streams: if name in applied_deltas: continue @@ -710,7 +710,7 @@ def _get_or_create_schema_state( "SELECT file FROM applied_schema_deltas WHERE version >= ?", (current_version,), ) - applied_deltas = tuple(d for d, in txn) + applied_deltas = tuple(d for (d,) in txn) return _SchemaState( current_version=current_version, diff --git a/synapse/storage/schema/main/delta/56/unique_user_filter_index.py b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py index 2461f87d77..b7535dae14 100644 --- a/synapse/storage/schema/main/delta/56/unique_user_filter_index.py +++ b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py @@ -41,8 +41,6 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> (user_id, filter_id); DROP TABLE user_filters; ALTER TABLE user_filters_migration RENAME TO user_filters; - """ % ( - select_clause, - ) + """ % (select_clause,) execute_statements_from_stream(cur, StringIO(sql)) diff --git a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py index 5d3578eaf4..a847ef4147 100644 --- a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py +++ b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py @@ -23,6 +23,7 @@ This migration handles the process of changing the type of `room_depth.min_depth` to a BIGINT. """ + from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine diff --git a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py index b4d4b6536b..9ac3d1d31f 100644 --- a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py +++ b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py @@ -25,6 +25,7 @@ This migration adds triggers to the partial_state_events tables to enforce uniqu Triggers cannot be expressed in .sql files, so we have to use a separate file. """ + from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine diff --git a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py index 93543fca7c..be80a6747d 100644 --- a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py +++ b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py @@ -26,6 +26,7 @@ for its completion can be removed. Note the background job must still remain defined in the database class. """ + from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine diff --git a/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py b/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py index 6609ef0dac..a847a93494 100644 --- a/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py +++ b/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py @@ -24,6 +24,7 @@ This migration adds triggers to the room membership tables to enforce consistency. Triggers cannot be expressed in .sql files, so we have to use a separate file. """ + from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine diff --git a/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py index ad9c394162..1c823a3aa1 100644 --- a/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py +++ b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py @@ -23,6 +23,7 @@ """ This migration adds foreign key constraint to `event_forward_extremities` table. """ + from synapse.storage.background_updates import ( ForeignKeyConstraint, run_validate_constraint_and_delete_rows_schema_delta, diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 5259550f1c..26783c5622 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -1308,7 +1308,7 @@ class DeviceListUpdates: def get_verify_key_from_cross_signing_key( - key_info: Mapping[str, Any] + key_info: Mapping[str, Any], ) -> Tuple[str, VerifyKey]: """Get the key ID and signedjson verify key from a cross-signing key dict diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 93b537ab7b..9f6fb087c1 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -268,7 +268,9 @@ class SlidingSyncBody(RequestBodyModel): if TYPE_CHECKING: ranges: Optional[List[Tuple[int, int]]] = None else: - ranges: Optional[List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]]] = None # type: ignore[valid-type] + ranges: Optional[ + List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]] + ] = None # type: ignore[valid-type] slow_get_all_rooms: Optional[StrictBool] = False filters: Optional[Filters] = None @@ -388,7 +390,9 @@ class SlidingSyncBody(RequestBodyModel): if TYPE_CHECKING: lists: Optional[Dict[str, SlidingSyncList]] = None else: - lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = None # type: ignore[valid-type] + lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = ( + None # type: ignore[valid-type] + ) room_subscriptions: Optional[Dict[StrictStr, RoomSubscription]] = None extensions: Optional[Extensions] = None diff --git a/synapse/types/state.py b/synapse/types/state.py index c958a95701..1141c4b5c1 100644 --- a/synapse/types/state.py +++ b/synapse/types/state.py @@ -503,13 +503,19 @@ class StateFilter: # - if so, which event types are excluded? ('excludes') # - which entire event types to include ('wildcards') # - which concrete state keys to include ('concrete state keys') - (self_all, self_excludes), ( - self_wildcards, - self_concrete_keys, + ( + (self_all, self_excludes), + ( + self_wildcards, + self_concrete_keys, + ), ) = self._decompose_into_four_parts() - (other_all, other_excludes), ( - other_wildcards, - other_concrete_keys, + ( + (other_all, other_excludes), + ( + other_wildcards, + other_concrete_keys, + ), ) = other._decompose_into_four_parts() # Start with an estimate of the difference based on self diff --git a/synapse/util/linked_list.py b/synapse/util/linked_list.py index e9a5fff211..87f801c0cf 100644 --- a/synapse/util/linked_list.py +++ b/synapse/util/linked_list.py @@ -19,8 +19,7 @@ # # -"""A circular doubly linked list implementation. -""" +"""A circular doubly linked list implementation.""" import threading from typing import Generic, Optional, Type, TypeVar diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 517e79ce5f..020618598c 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -110,7 +110,7 @@ def measure_func( """ def wrapper( - func: Callable[Concatenate[HasClock, P], Awaitable[R]] + func: Callable[Concatenate[HasClock, P], Awaitable[R]], ) -> Callable[P, Awaitable[R]]: block_name = func.__name__ if name is None else name diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index 46dad32156..56bdf451da 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -50,7 +50,7 @@ def do_patch() -> None: return def new_inline_callbacks( - f: Callable[P, Generator["Deferred[object]", object, T]] + f: Callable[P, Generator["Deferred[object]", object, T]], ) -> Callable[P, "Deferred[T]"]: @functools.wraps(f) def wrapped(*args: P.args, **kwargs: P.kwargs) -> "Deferred[T]": diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 8ead72bb7a..3f067b792c 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -103,7 +103,7 @@ _rate_limiter_instances_lock = threading.Lock() def _get_counts_from_rate_limiter_instance( - count_func: Callable[["FederationRateLimiter"], int] + count_func: Callable[["FederationRateLimiter"], int], ) -> Mapping[Tuple[str, ...], int]: """Returns a count of something (slept/rejected hosts) by (metrics_name)""" # Cast to a list to prevent it changing while the Prometheus diff --git a/synapse/visibility.py b/synapse/visibility.py index 128413c8aa..3a2782bade 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -135,9 +135,9 @@ async def filter_events_for_client( retention_policies: Dict[str, RetentionPolicy] = {} for room_id in room_ids: - retention_policies[room_id] = ( - await storage.main.get_retention_policy_for_room(room_id) - ) + retention_policies[ + room_id + ] = await storage.main.get_retention_policy_for_room(room_id) def allowed(event: EventBase) -> Optional[EventBase]: state_after_event = event_id_to_state.get(event.event_id) diff --git a/synmark/__main__.py b/synmark/__main__.py index cac57cf111..746261a1ec 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -40,7 +40,7 @@ T = TypeVar("T") def make_test( - main: Callable[[ISynapseReactor, int], Coroutine[Any, Any, float]] + main: Callable[[ISynapseReactor, int], Coroutine[Any, Any, float]], ) -> Callable[[int], float]: """ Take a benchmark function and wrap it in a reactor start and stop. diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index a1c7ccdd0b..730b00a9fb 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -150,7 +150,8 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): self.assertEqual(1, len(self.txnctrl.recoverers)) # and stored self.assertEqual(0, txn.complete.call_count) # txn not completed self.store.set_appservice_state.assert_called_once_with( - service, ApplicationServiceState.DOWN # service marked as down + service, + ApplicationServiceState.DOWN, # service marked as down ) diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 30f8787758..654e6521a2 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -756,7 +756,8 @@ class SerializeEventTestCase(stdlib_unittest.TestCase): def test_event_fields_fail_if_fields_not_str(self) -> None: with self.assertRaises(TypeError): self.serialize( - MockEvent(room_id="!foo:bar", content={"foo": "bar"}), ["room_id", 4] # type: ignore[list-item] + MockEvent(room_id="!foo:bar", content={"foo": "bar"}), + ["room_id", 4], # type: ignore[list-item] ) diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 9bd97e5d4e..87b9ffc0c6 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -158,7 +158,9 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): async def get_current_state_event_counts(room_id: str) -> int: return 600 - self.hs.get_datastores().main.get_current_state_event_counts = get_current_state_event_counts # type: ignore[method-assign] + self.hs.get_datastores().main.get_current_state_event_counts = ( # type: ignore[method-assign] + get_current_state_event_counts + ) d = handler._remote_join( create_requester(u1), diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 08214b0013..1e1ed8e642 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -401,7 +401,10 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): now = self.clock.time_msec() self.get_success( self.hs.get_datastores().main.set_destination_retry_timings( - "zzzerver", now, now, 24 * 60 * 60 * 1000 # retry in 1 day + "zzzerver", + now, + now, + 24 * 60 * 60 * 1000, # retry in 1 day ) ) diff --git a/tests/federation/test_federation_media.py b/tests/federation/test_federation_media.py index 0dcf20f5f5..e66aae499b 100644 --- a/tests/federation/test_federation_media.py +++ b/tests/federation/test_federation_media.py @@ -40,7 +40,6 @@ from tests.test_utils import SMALL_PNG class FederationMediaDownloadsTest(unittest.FederatingHomeserverTestCase): - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-") @@ -150,7 +149,6 @@ class FederationMediaDownloadsTest(unittest.FederatingHomeserverTestCase): class FederationThumbnailTest(unittest.FederatingHomeserverTestCase): - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-") diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index 1b83aea579..5db10fa74c 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -288,13 +288,15 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): } # We also expect an outbound request to /state - self.mock_federation_transport_client.get_room_state.return_value = StateRequestResponse( - # Mimic the other server not knowing about the state at all. - # We want to cause Synapse to throw an error (`Unable to get - # missing prev_event $fake_prev_event`) and fail to backfill - # the pulled event. - auth_events=[], - state=[], + self.mock_federation_transport_client.get_room_state.return_value = ( + StateRequestResponse( + # Mimic the other server not knowing about the state at all. + # We want to cause Synapse to throw an error (`Unable to get + # missing prev_event $fake_prev_event`) and fail to backfill + # the pulled event. + auth_events=[], + state=[], + ) ) pulled_event = make_event_from_dict( diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index cc630d606c..598d6c13cd 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -1107,7 +1107,9 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): ), ] ], - name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[5] else 'monolith'}", + name_func=lambda testcase_func, + param_num, + params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[5] else 'monolith'}", ) @unittest.override_config({"experimental_features": {"msc3026_enabled": True}}) def test_set_presence_from_syncing_multi_device( @@ -1343,7 +1345,9 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): ), ] ], - name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[4] else 'monolith'}", + name_func=lambda testcase_func, + param_num, + params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[4] else 'monolith'}", ) @unittest.override_config({"experimental_features": {"msc3026_enabled": True}}) def test_set_presence_from_non_syncing_multi_device( diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index fa55f76916..d7bbc68037 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -843,7 +843,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): ) -> List[EventBase]: return list(pdus) - self.client._check_sigs_and_hash_for_pulled_events_and_fetch = _check_sigs_and_hash_for_pulled_events_and_fetch # type: ignore[assignment] + self.client._check_sigs_and_hash_for_pulled_events_and_fetch = ( # type: ignore[method-assign] + _check_sigs_and_hash_for_pulled_events_and_fetch # type: ignore[assignment] + ) prev_events = self.get_success(self.store.get_prev_events_for_room(room_id)) diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 8e8621e348..ffcbf4b3ca 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -93,9 +93,7 @@ class SrvResolverTestCase(unittest.TestCase): resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) servers: List[Server] - servers = yield defer.ensureDeferred( - resolver.resolve_service(service_name) - ) # type: ignore[assignment] + servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) # type: ignore[assignment] dns_client_mock.lookupService.assert_called_once_with(service_name) @@ -122,9 +120,7 @@ class SrvResolverTestCase(unittest.TestCase): ) servers: List[Server] - servers = yield defer.ensureDeferred( - resolver.resolve_service(service_name) - ) # type: ignore[assignment] + servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) # type: ignore[assignment] self.assertFalse(dns_client_mock.lookupService.called) @@ -157,9 +153,7 @@ class SrvResolverTestCase(unittest.TestCase): resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) servers: List[Server] - servers = yield defer.ensureDeferred( - resolver.resolve_service(service_name) - ) # type: ignore[assignment] + servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) # type: ignore[assignment] self.assertEqual(len(servers), 0) self.assertEqual(len(cache), 0) diff --git a/tests/http/test_client.py b/tests/http/test_client.py index f2abec190b..ac6470ebbd 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -207,7 +207,9 @@ class ReadMultipartResponseTests(TestCase): class ReadBodyWithMaxSizeTests(TestCase): - def _build_response(self, length: Union[int, str] = UNKNOWN_LENGTH) -> Tuple[ + def _build_response( + self, length: Union[int, str] = UNKNOWN_LENGTH + ) -> Tuple[ BytesIO, "Deferred[int]", _DiscardBodyWithMaxSizeProtocol, diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index 6827412373..6588695e37 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -895,21 +895,23 @@ class FederationClientProxyTests(BaseMultiWorkerStreamTestCase): ) # Fake `remoteserv:8008` responding to requests - mock_agent_on_federation_sender.request.side_effect = lambda *args, **kwargs: defer.succeed( - FakeResponse( - code=200, - body=b'{"foo": "bar"}', - headers=Headers( - { - "Content-Type": ["application/json"], - "Connection": ["close, X-Foo, X-Bar"], - # Should be removed because it's defined in the `Connection` header - "X-Foo": ["foo"], - "X-Bar": ["bar"], - # Should be removed because it's a hop-by-hop header - "Proxy-Authorization": "abcdef", - } - ), + mock_agent_on_federation_sender.request.side_effect = ( + lambda *args, **kwargs: defer.succeed( + FakeResponse( + code=200, + body=b'{"foo": "bar"}', + headers=Headers( + { + "Content-Type": ["application/json"], + "Connection": ["close, X-Foo, X-Bar"], + # Should be removed because it's defined in the `Connection` header + "X-Foo": ["foo"], + "X-Bar": ["bar"], + # Should be removed because it's a hop-by-hop header + "Proxy-Authorization": "abcdef", + } + ), + ) ) ) diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index 18af2735fe..db39ecf244 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -76,7 +76,7 @@ class TestServletUtils(unittest.TestCase): # Invalid UTF-8. with self.assertRaises(SynapseError): - parse_json_value_from_request(make_request(b"\xFF\x00")) + parse_json_value_from_request(make_request(b"\xff\x00")) # Invalid JSON. with self.assertRaises(SynapseError): diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index e55001fb40..e50ff5fa78 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -261,7 +261,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): """A mock for MatrixFederationHttpClient.get_file.""" def write_to( - r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]] + r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]], ) -> Tuple[int, Dict[bytes, List[bytes]]]: data, response = r output_stream.write(data) diff --git a/tests/module_api/test_account_data_manager.py b/tests/module_api/test_account_data_manager.py index fd87eaffd0..1a1d5609b2 100644 --- a/tests/module_api/test_account_data_manager.py +++ b/tests/module_api/test_account_data_manager.py @@ -164,6 +164,8 @@ class ModuleApiTestCase(HomeserverTestCase): # noinspection PyTypeChecker self.get_success_or_raise( self._module_api.account_data_manager.put_global( - self.user_id, "test.data", 42 # type: ignore[arg-type] + self.user_id, + "test.data", + 42, # type: ignore[arg-type] ) ) diff --git a/tests/push/test_email.py b/tests/push/test_email.py index e0aab1c046..4fafb71897 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -44,6 +44,7 @@ from tests.unittest import HomeserverTestCase @attr.s(auto_attribs=True) class _User: "Helper wrapper for user ID and access token" + id: str token: str diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index 2a1e42bbc8..150caeeee2 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -531,9 +531,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): # simulate a change in server config after a server restart. new_display_name = "new display name" - self.server_notices_manager._config.servernotices.server_notices_mxid_display_name = ( - new_display_name - ) + self.server_notices_manager._config.servernotices.server_notices_mxid_display_name = new_display_name self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all() self.make_request( @@ -577,9 +575,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): # simulate a change in server config after a server restart. new_avatar_url = "test/new-url" - self.server_notices_manager._config.servernotices.server_notices_mxid_avatar_url = ( - new_avatar_url - ) + self.server_notices_manager._config.servernotices.server_notices_mxid_avatar_url = new_avatar_url self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all() self.make_request( @@ -692,9 +688,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): # simulate a change in server config after a server restart. new_avatar_url = "test/new-url" - self.server_notices_manager._config.servernotices.server_notices_room_avatar_url = ( - new_avatar_url - ) + self.server_notices_manager._config.servernotices.server_notices_room_avatar_url = new_avatar_url self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all() self.make_request( diff --git a/tests/rest/client/sliding_sync/test_connection_tracking.py b/tests/rest/client/sliding_sync/test_connection_tracking.py index 436bd4466c..5b819103c2 100644 --- a/tests/rest/client/sliding_sync/test_connection_tracking.py +++ b/tests/rest/client/sliding_sync/test_connection_tracking.py @@ -38,7 +38,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase): """ diff --git a/tests/rest/client/sliding_sync/test_extension_account_data.py b/tests/rest/client/sliding_sync/test_extension_account_data.py index 65a6adf4af..6cc883a4be 100644 --- a/tests/rest/client/sliding_sync/test_extension_account_data.py +++ b/tests/rest/client/sliding_sync/test_extension_account_data.py @@ -40,7 +40,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): """Tests for the account_data sliding sync extension""" diff --git a/tests/rest/client/sliding_sync/test_extension_e2ee.py b/tests/rest/client/sliding_sync/test_extension_e2ee.py index 2ff6687796..7ce6592d8f 100644 --- a/tests/rest/client/sliding_sync/test_extension_e2ee.py +++ b/tests/rest/client/sliding_sync/test_extension_e2ee.py @@ -39,7 +39,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): """Tests for the e2ee sliding sync extension""" diff --git a/tests/rest/client/sliding_sync/test_extension_receipts.py b/tests/rest/client/sliding_sync/test_extension_receipts.py index 90b035dd75..6e7700b533 100644 --- a/tests/rest/client/sliding_sync/test_extension_receipts.py +++ b/tests/rest/client/sliding_sync/test_extension_receipts.py @@ -40,7 +40,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): """Tests for the receipts sliding sync extension""" diff --git a/tests/rest/client/sliding_sync/test_extension_to_device.py b/tests/rest/client/sliding_sync/test_extension_to_device.py index 5ba2443089..790abb739d 100644 --- a/tests/rest/client/sliding_sync/test_extension_to_device.py +++ b/tests/rest/client/sliding_sync/test_extension_to_device.py @@ -40,7 +40,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): """Tests for the to-device sliding sync extension""" diff --git a/tests/rest/client/sliding_sync/test_extension_typing.py b/tests/rest/client/sliding_sync/test_extension_typing.py index 0a0f5aff1a..f87c3c8b17 100644 --- a/tests/rest/client/sliding_sync/test_extension_typing.py +++ b/tests/rest/client/sliding_sync/test_extension_typing.py @@ -40,7 +40,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncTypingExtensionTestCase(SlidingSyncBase): """Tests for the typing notification sliding sync extension""" diff --git a/tests/rest/client/sliding_sync/test_extensions.py b/tests/rest/client/sliding_sync/test_extensions.py index 32478467aa..30230e5c4b 100644 --- a/tests/rest/client/sliding_sync/test_extensions.py +++ b/tests/rest/client/sliding_sync/test_extensions.py @@ -40,7 +40,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncExtensionsTestCase(SlidingSyncBase): """ diff --git a/tests/rest/client/sliding_sync/test_room_subscriptions.py b/tests/rest/client/sliding_sync/test_room_subscriptions.py index e81d251839..285fdaaf78 100644 --- a/tests/rest/client/sliding_sync/test_room_subscriptions.py +++ b/tests/rest/client/sliding_sync/test_room_subscriptions.py @@ -39,7 +39,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncRoomSubscriptionsTestCase(SlidingSyncBase): """ diff --git a/tests/rest/client/sliding_sync/test_rooms_invites.py b/tests/rest/client/sliding_sync/test_rooms_invites.py index f6f45c2500..882762ca29 100644 --- a/tests/rest/client/sliding_sync/test_rooms_invites.py +++ b/tests/rest/client/sliding_sync/test_rooms_invites.py @@ -39,7 +39,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncRoomsInvitesTestCase(SlidingSyncBase): """ diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 71542923da..4ed49040c1 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -40,7 +40,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): """ diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index 436ae684da..91ac6c5a0e 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -40,7 +40,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): """ diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py index e56fb58012..0e027ff39d 100644 --- a/tests/rest/client/sliding_sync/test_rooms_timeline.py +++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py @@ -40,7 +40,9 @@ logger = logging.getLogger(__name__) (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): """ diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index 1dcc15b082..930cb5ef45 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -232,7 +232,9 @@ class SlidingSyncBase(unittest.HomeserverTestCase): (True,), (False,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", ) class SlidingSyncTestCase(SlidingSyncBase): """ diff --git a/tests/rest/client/test_auth_issuer.py b/tests/rest/client/test_auth_issuer.py index 299475a35c..d6f334a7ab 100644 --- a/tests/rest/client/test_auth_issuer.py +++ b/tests/rest/client/test_auth_issuer.py @@ -63,7 +63,9 @@ class AuthIssuerTestCase(HomeserverTestCase): self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual(channel.json_body, {"issuer": ISSUER}) - req_mock.assert_called_with("https://account.example.com/.well-known/openid-configuration") + req_mock.assert_called_with( + "https://account.example.com/.well-known/openid-configuration" + ) req_mock.reset_mock() # Second call it should use the cached value diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py index 06f1c1b234..039144fdbe 100644 --- a/tests/rest/client/test_events.py +++ b/tests/rest/client/test_events.py @@ -19,7 +19,7 @@ # # -""" Tests REST events for /events paths.""" +"""Tests REST events for /events paths.""" from unittest.mock import Mock diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py index 30b6d31d0a..42014e257e 100644 --- a/tests/rest/client/test_media.py +++ b/tests/rest/client/test_media.py @@ -1957,7 +1957,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): """A mock for MatrixFederationHttpClient.federation_get_file.""" def write_to( - r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]] + r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]], ) -> Tuple[int, Dict[bytes, List[bytes]], bytes]: data, response = r output_stream.write(data) @@ -1991,7 +1991,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): """A mock for MatrixFederationHttpClient.get_file.""" def write_to( - r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]] + r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]], ) -> Tuple[int, Dict[bytes, List[bytes]]]: data, response = r output_stream.write(data) diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index f98f3f77aa..a92713d220 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -20,6 +20,7 @@ # """Tests REST events for /profile paths.""" + import urllib.parse from http import HTTPStatus from typing import Any, Dict, Optional diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 694f143eff..c091f403cc 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -1049,9 +1049,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # Check that the HTML we're getting is the one we expect when using an # invalid/unknown token. - expected_html = ( - self.hs.config.account_validity.account_validity_invalid_token_template.render() - ) + expected_html = self.hs.config.account_validity.account_validity_invalid_token_template.render() self.assertEqual( channel.result["body"], expected_html.encode("utf8"), channel.result ) diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 9614cdd66a..a1c284726a 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -330,22 +330,24 @@ class RestHelper: data, ) - assert ( - channel.code == expect_code - ), "Expected: %d, got: %d, PUT %s -> resp: %r" % ( - expect_code, - channel.code, - path, - channel.result["body"], + assert channel.code == expect_code, ( + "Expected: %d, got: %d, PUT %s -> resp: %r" + % ( + expect_code, + channel.code, + path, + channel.result["body"], + ) ) if expect_errcode: - assert ( - str(channel.json_body["errcode"]) == expect_errcode - ), "Expected: %r, got: %r, resp: %r" % ( - expect_errcode, - channel.json_body["errcode"], - channel.result["body"], + assert str(channel.json_body["errcode"]) == expect_errcode, ( + "Expected: %r, got: %r, resp: %r" + % ( + expect_errcode, + channel.json_body["errcode"], + channel.result["body"], + ) ) if expect_additional_fields is not None: @@ -354,13 +356,14 @@ class RestHelper: expect_key, channel.json_body, ) - assert ( - channel.json_body[expect_key] == expect_value - ), "Expected: %s at %s, got: %s, resp: %s" % ( - expect_value, - expect_key, - channel.json_body[expect_key], - channel.json_body, + assert channel.json_body[expect_key] == expect_value, ( + "Expected: %s at %s, got: %s, resp: %s" + % ( + expect_value, + expect_key, + channel.json_body[expect_key], + channel.json_body, + ) ) self.auth_user_id = temp_id diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index ac992766e8..c73717f014 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -124,7 +124,12 @@ class WellKnownTests(unittest.HomeserverTestCase): ) def test_client_well_known_msc3861_oauth_delegation(self) -> None: # Patch the HTTP client to return the issuer metadata - req_mock = AsyncMock(return_value={"issuer": "https://issuer", "account_management_uri": "https://my-account.issuer"}) + req_mock = AsyncMock( + return_value={ + "issuer": "https://issuer", + "account_management_uri": "https://my-account.issuer", + } + ) self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign] for _ in range(2): @@ -145,4 +150,6 @@ class WellKnownTests(unittest.HomeserverTestCase): ) # It should have been called exactly once, because it gets cached - req_mock.assert_called_once_with("https://issuer/.well-known/openid-configuration") + req_mock.assert_called_once_with( + "https://issuer/.well-known/openid-configuration" + ) diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 6d1ae4c8d7..f12402f5f2 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -292,12 +292,14 @@ class EventAuthTestCase(unittest.TestCase): ] # pleb should not be able to send state - self.assertRaises( - AuthError, - event_auth.check_state_dependent_auth_rules, - _random_state_event(RoomVersions.V1, pleb), - auth_events, - ), + ( + self.assertRaises( + AuthError, + event_auth.check_state_dependent_auth_rules, + _random_state_event(RoomVersions.V1, pleb), + auth_events, + ), + ) # king should be able to send state event_auth.check_state_dependent_auth_rules( diff --git a/tests/test_federation.py b/tests/test_federation.py index 4e9adc0625..94b0fa9856 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -101,7 +101,9 @@ class MessageAcceptTests(unittest.HomeserverTestCase): ) -> List[EventBase]: return list(pdus) - self.client._check_sigs_and_hash_for_pulled_events_and_fetch = _check_sigs_and_hash_for_pulled_events_and_fetch # type: ignore[assignment] + self.client._check_sigs_and_hash_for_pulled_events_and_fetch = ( # type: ignore[method-assign] + _check_sigs_and_hash_for_pulled_events_and_fetch # type: ignore[assignment] + ) # Send the join, it should return None (which is not an error) self.assertEqual( diff --git a/tests/test_types.py b/tests/test_types.py index 00adc65a5a..0c08bc8ecc 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -145,7 +145,9 @@ class MapUsernameTestCase(unittest.TestCase): (MultiWriterStreamToken,), (RoomStreamToken,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{params_dict['token_type'].__name__}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{params_dict['token_type'].__name__}", ) class MultiWriterTokenTestCase(unittest.HomeserverTestCase): """Tests for the different types of multi writer tokens.""" diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index 4ab42a02b9..4d7adf7204 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -22,6 +22,7 @@ """ Utilities for running the unit tests """ + import json import sys import warnings diff --git a/tests/unittest.py b/tests/unittest.py index 2532fa49fb..614e805abd 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -457,7 +457,9 @@ class HomeserverTestCase(TestCase): # Type ignore: mypy doesn't like us assigning to methods. self.hs.get_auth().get_user_by_req = get_requester # type: ignore[method-assign] self.hs.get_auth().get_user_by_access_token = get_requester # type: ignore[method-assign] - self.hs.get_auth().get_access_token_from_request = Mock(return_value=token) # type: ignore[method-assign] + self.hs.get_auth().get_access_token_from_request = Mock( # type: ignore[method-assign] + return_value=token + ) if self.needs_threadpool: self.reactor.threadpool = ThreadPool() # type: ignore[assignment] From f729ef08c90b7cd337074b267a7b2768e44a7b6c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Sep 2024 15:09:04 +0100 Subject: [PATCH 196/332] Enable sliding sync support by default (#17648) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17648.feature | 1 + synapse/config/experimental.py | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17648.feature diff --git a/changelog.d/17648.feature b/changelog.d/17648.feature new file mode 100644 index 0000000000..49439179da --- /dev/null +++ b/changelog.d/17648.feature @@ -0,0 +1 @@ +Enable native sliding sync support ([MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) and [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186)) by default. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index bae9cc8047..5d99c201a7 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -338,8 +338,10 @@ class ExperimentalConfig(Config): # MSC3391: Removing account data. self.msc3391_enabled = experimental.get("msc3391_enabled", False) - # MSC3575 (Sliding Sync API endpoints) - self.msc3575_enabled: bool = experimental.get("msc3575_enabled", False) + # MSC3575 (Sliding Sync) alternate endpoints, c.f. MSC4186. + # + # This is enabled by default as a replacement for the sliding sync proxy. + self.msc3575_enabled: bool = experimental.get("msc3575_enabled", True) # MSC3773: Thread notifications self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False) From ac27c9e46a9c518df354f17c1ad64bea4839e324 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Sep 2024 15:14:57 +0100 Subject: [PATCH 197/332] 1.114.0 --- CHANGES.md | 9 +++++++++ changelog.d/17648.feature | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/17648.feature diff --git a/CHANGES.md b/CHANGES.md index a8d3a917e2..12c08f821e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +# Synapse 1.114.0 (2024-09-02) + +### Features + +- Enable native sliding sync support ([MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) and [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186)) by default. ([\#17648](https://github.com/element-hq/synapse/issues/17648)) + + + + # Synapse 1.114.0rc3 (2024-08-30) ### Bugfixes diff --git a/changelog.d/17648.feature b/changelog.d/17648.feature deleted file mode 100644 index 49439179da..0000000000 --- a/changelog.d/17648.feature +++ /dev/null @@ -1 +0,0 @@ -Enable native sliding sync support ([MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) and [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186)) by default. diff --git a/debian/changelog b/debian/changelog index 32a2332c4f..dfb48edc49 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.114.0) stable; urgency=medium + + * New Synapse release 1.114.0. + + -- Synapse Packaging team Mon, 02 Sep 2024 15:14:53 +0100 + matrix-synapse-py3 (1.114.0~rc3) stable; urgency=medium * New Synapse release 1.114.0rc3. diff --git a/pyproject.toml b/pyproject.toml index bc88ca9c60..69a82b8e1c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.114.0rc3" +version = "1.114.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 6722adf04e8daad3e387095805ecb4150254932f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Sep 2024 16:27:13 +0100 Subject: [PATCH 198/332] Update changelog --- CHANGES.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 12c08f821e..943dd6b67f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,11 @@ # Synapse 1.114.0 (2024-09-02) +This release enables support for +[MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) — +Simplified Sliding Sync. This allows using the upcoming releases of the Element +X mobile apps without having to run a Sliding Sync Proxy. + + ### Features - Enable native sliding sync support ([MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) and [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186)) by default. ([\#17648](https://github.com/element-hq/synapse/issues/17648)) From 5eec67b6ef4b76caa2324a80e01e361bfa84a929 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Sep 2024 17:08:34 +0100 Subject: [PATCH 199/332] Fix changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 943dd6b67f..d3cec9cc15 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -44,7 +44,7 @@ X mobile apps without having to run a Sliding Sync Proxy. - Add support to `@tag_args` for standalone functions. ([\#17604](https://github.com/element-hq/synapse/issues/17604)) - Speed up incremental syncs in sliding sync by adding some more caching. ([\#17606](https://github.com/element-hq/synapse/issues/17606)) - Always return the user's own read receipts in sliding sync. ([\#17617](https://github.com/element-hq/synapse/issues/17617)) -- Replace `isort` and `black with `ruff`. ([\#17620](https://github.com/element-hq/synapse/issues/17620)) +- Replace `isort` and `black` with `ruff`. ([\#17620](https://github.com/element-hq/synapse/issues/17620)) - Refactor sliding sync code to move room list logic out into a separate class. ([\#17622](https://github.com/element-hq/synapse/issues/17622)) From 1c5d2a41975b9a987d578069e3e7ec988d1b3c29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 18:19:42 +0100 Subject: [PATCH 200/332] Bump types-pillow from 10.2.0.20240520 to 10.2.0.20240822 (#17644) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 731adff9d4..d61d64b029 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2761,13 +2761,13 @@ files = [ [[package]] name = "types-pillow" -version = "10.2.0.20240520" +version = "10.2.0.20240822" description = "Typing stubs for Pillow" optional = false python-versions = ">=3.8" files = [ - {file = "types-Pillow-10.2.0.20240520.tar.gz", hash = "sha256:130b979195465fa1e1676d8e81c9c7c30319e8e95b12fae945e8f0d525213107"}, - {file = "types_Pillow-10.2.0.20240520-py3-none-any.whl", hash = "sha256:33c36494b380e2a269bb742181bea5d9b00820367822dbd3760f07210a1da23d"}, + {file = "types-Pillow-10.2.0.20240822.tar.gz", hash = "sha256:559fb52a2ef991c326e4a0d20accb3bb63a7ba8d40eb493e0ecb0310ba52f0d3"}, + {file = "types_Pillow-10.2.0.20240822-py3-none-any.whl", hash = "sha256:d9dab025aba07aeb12fd50a6799d4eac52a9603488eca09d7662543983f16c5d"}, ] [[package]] From c24cce73a1daa9972b87e308fbb66f849bc34579 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 18:37:30 +0100 Subject: [PATCH 201/332] Bump towncrier from 24.7.1 to 24.8.0 (#17645) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index d61d64b029..fc93f245e9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2557,13 +2557,13 @@ files = [ [[package]] name = "towncrier" -version = "24.7.1" +version = "24.8.0" description = "Building newsfiles for your project." optional = false python-versions = ">=3.8" files = [ - {file = "towncrier-24.7.1-py3-none-any.whl", hash = "sha256:685e2a94335b5dc47537b4d3b449a25b18571ea85b07dcf6e8df31ba40f692dd"}, - {file = "towncrier-24.7.1.tar.gz", hash = "sha256:57a057faedabcadf1a62f6f9bad726ae566c1f31a411338ddb8316993f583b3d"}, + {file = "towncrier-24.8.0-py3-none-any.whl", hash = "sha256:9343209592b839209cdf28c339ba45792fbfe9775b5f9c177462fd693e127d8d"}, + {file = "towncrier-24.8.0.tar.gz", hash = "sha256:013423ee7eed102b2f393c287d22d95f66f1a3ea10a4baa82d298001a7f18af3"}, ] [package.dependencies] From 4255c03599e7f53e63261c80deffec4b3ed08d8a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 18:38:01 +0100 Subject: [PATCH 202/332] Bump types-psycopg2 from 2.9.21.20240417 to 2.9.21.20240819 (#17646) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index fc93f245e9..281448f9e1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2772,13 +2772,13 @@ files = [ [[package]] name = "types-psycopg2" -version = "2.9.21.20240417" +version = "2.9.21.20240819" description = "Typing stubs for psycopg2" optional = false python-versions = ">=3.8" files = [ - {file = "types-psycopg2-2.9.21.20240417.tar.gz", hash = "sha256:05db256f4a459fb21a426b8e7fca0656c3539105ff0208eaf6bdaf406a387087"}, - {file = "types_psycopg2-2.9.21.20240417-py3-none-any.whl", hash = "sha256:644d6644d64ebbe37203229b00771012fb3b3bddd507a129a2e136485990e4f8"}, + {file = "types-psycopg2-2.9.21.20240819.tar.gz", hash = "sha256:4ed6b47464d6374fa64e5e3b234cea0f710e72123a4596d67ab50b7415a84666"}, + {file = "types_psycopg2-2.9.21.20240819-py3-none-any.whl", hash = "sha256:c9192311c27d7ad561eef705f1b2df1074f2cdcf445a98a6a2fcaaaad43278cf"}, ] [[package]] From fc10d388494cfda102056bbbe90540ae47e98b67 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 18:48:43 +0100 Subject: [PATCH 203/332] Bump twisted from 24.7.0rc1 to 24.7.0 (#17647) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 281448f9e1..85950ff707 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2622,13 +2622,13 @@ urllib3 = ">=1.26.0" [[package]] name = "twisted" -version = "24.7.0rc1" +version = "24.7.0" description = "An asynchronous networking framework written in Python" optional = false python-versions = ">=3.8.0" files = [ - {file = "twisted-24.7.0rc1-py3-none-any.whl", hash = "sha256:f37d6656fe4e2871fab29d8952ae90bd6ca8b48a9e4dfa1b348f4cd62e6ba0bb"}, - {file = "twisted-24.7.0rc1.tar.gz", hash = "sha256:bbc4a2193ca34cfa32f626300746698a6d70fcd77d9c0b79a664c347e39634fc"}, + {file = "twisted-24.7.0-py3-none-any.whl", hash = "sha256:734832ef98108136e222b5230075b1079dad8a3fc5637319615619a7725b0c81"}, + {file = "twisted-24.7.0.tar.gz", hash = "sha256:5a60147f044187a127ec7da96d170d49bcce50c6fd36f594e60f4587eff4d394"}, ] [package.dependencies] From dce38f3faf169b5d93a1e9b8e00915b7ddc4fb7e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Sep 2024 10:52:46 +0100 Subject: [PATCH 204/332] Fix sliding sync on workers (#17649) Broke in #17630 --------- Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17649.misc | 1 + synapse/storage/databases/main/events_bg_updates.py | 11 ----------- synapse/storage/databases/main/roommember.py | 12 ++++++++++++ 3 files changed, 13 insertions(+), 11 deletions(-) create mode 100644 changelog.d/17649.misc diff --git a/changelog.d/17649.misc b/changelog.d/17649.misc new file mode 100644 index 0000000000..ed1bf6bd55 --- /dev/null +++ b/changelog.d/17649.misc @@ -0,0 +1 @@ +Use new database tables for sliding sync. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 4209100a5c..307440a614 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -2342,17 +2342,6 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS return len(memberships_to_update_rows) - async def have_finished_sliding_sync_background_jobs(self) -> bool: - """Return if its safe to use the sliding sync membership tables.""" - - return await self.db_pool.updates.have_completed_background_updates( - ( - _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, - _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE, - _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, - ) - ) - def _resolve_stale_data_in_sliding_sync_tables( txn: LoggingTransaction, diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 3d834b4bf1..8df760e8a6 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -51,6 +51,7 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore +from synapse.storage.databases.main.events_bg_updates import _BackgroundUpdates from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import Sqlite3Engine from synapse.storage.roommember import ( @@ -1424,6 +1425,17 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): get_sliding_sync_rooms_for_user_txn, ) + async def have_finished_sliding_sync_background_jobs(self) -> bool: + """Return if it's safe to use the sliding sync membership tables.""" + + return await self.db_pool.updates.have_completed_background_updates( + ( + _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, + _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE, + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + ) + ) + class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__( From b054690c8cfcdea53a4b9fa203775e368f18ba77 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 5 Sep 2024 04:05:01 -0500 Subject: [PATCH 205/332] Sliding Sync: Prevent duplicate tags being added to traces (#17655) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prevent duplicate tags being added to traces. Noticed because we see these warnings in Jaeger: Screenshot 2024-09-03 at 2 34 05 PM --- changelog.d/17655.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 33 ++++++++++++----------- 2 files changed, 18 insertions(+), 16 deletions(-) create mode 100644 changelog.d/17655.misc diff --git a/changelog.d/17655.misc b/changelog.d/17655.misc new file mode 100644 index 0000000000..ce997d3b41 --- /dev/null +++ b/changelog.d/17655.misc @@ -0,0 +1 @@ +Prevent duplicate tags being added to Sliding Sync traces. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index f79796a336..ac6dc79fdf 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -449,6 +449,7 @@ class SlidingSyncHandler: return state_map + @trace async def get_room_sync_data( self, sync_config: SlidingSyncConfig, @@ -839,13 +840,13 @@ class SlidingSyncHandler: required_state_filter = StateFilter.all() else: required_state_types: List[Tuple[str, Optional[str]]] = [] + num_wild_state_keys = 0 + lazy_load_room_members = False + num_others = 0 for ( state_type, state_key_set, ) in room_sync_config.required_state_map.items(): - num_wild_state_keys = 0 - lazy_load_room_members = False - num_others = 0 for state_key in state_key_set: if state_key == StateValues.WILDCARD: num_wild_state_keys += 1 @@ -877,19 +878,19 @@ class SlidingSyncHandler: num_others += 1 required_state_types.append((state_type, state_key)) - set_tag( - SynapseTags.FUNC_ARG_PREFIX - + "required_state_wildcard_state_key_count", - num_wild_state_keys, - ) - set_tag( - SynapseTags.FUNC_ARG_PREFIX + "required_state_lazy", - lazy_load_room_members, - ) - set_tag( - SynapseTags.FUNC_ARG_PREFIX + "required_state_other_count", - num_others, - ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + + "required_state_wildcard_state_key_count", + num_wild_state_keys, + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "required_state_lazy", + lazy_load_room_members, + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "required_state_other_count", + num_others, + ) required_state_filter = StateFilter.from_types(required_state_types) From b09bcf16d97799351162aa8ea698c33efc2238d0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 5 Sep 2024 14:15:04 +0100 Subject: [PATCH 206/332] Fix background update to handle invalid events (#17641) Follow-up to #17634, https://github.com/element-hq/synapse/pull/17631 and https://github.com/element-hq/synapse/pull/17632 to fix-up https://github.com/element-hq/synapse/pull/17512 --- changelog.d/17641.misc | 1 + synapse/storage/databases/main/events_bg_updates.py | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17641.misc diff --git a/changelog.d/17641.misc b/changelog.d/17641.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17641.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 307440a614..e20fc4471e 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -41,7 +41,10 @@ from synapse.storage.databases.main.events import ( SlidingSyncMembershipSnapshotSharedInsertValues, SlidingSyncStateInsertValues, ) -from synapse.storage.databases.main.events_worker import DatabaseCorruptionError +from synapse.storage.databases.main.events_worker import ( + DatabaseCorruptionError, + InvalidEventError, +) from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.types import Cursor @@ -2089,7 +2092,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS fetched_events = await self.get_events( current_state_ids_map.values() ) - except DatabaseCorruptionError as e: + except (DatabaseCorruptionError, InvalidEventError) as e: logger.warning( "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", room_id, @@ -2197,7 +2200,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS try: fetched_events = await self.get_events(state_ids_map.values()) - except DatabaseCorruptionError as e: + except (DatabaseCorruptionError, InvalidEventError) as e: logger.warning( "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", room_id, From f73c844403de00630fd773075cefe6f502b54e69 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 5 Sep 2024 15:42:49 +0100 Subject: [PATCH 207/332] Fix bump stamp for non-joined rooms We should only look for bump stamps in joined rooms, otherwise we should just use the membership stream ordering. --- synapse/handlers/sliding_sync/__init__.py | 38 ++++++++-------- .../client/sliding_sync/test_rooms_meta.py | 45 +++++++++++++++++++ 2 files changed, 65 insertions(+), 18 deletions(-) diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index ac6dc79fdf..f76b846fcc 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -967,27 +967,29 @@ class SlidingSyncHandler: ) ) - # Figure out the last bump event in the room - last_bump_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, - to_token.room_key, - event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, - ) - ) - # By default, just choose the membership event position bump_stamp = room_membership_for_user_at_to_token.event_pos.stream - # But if we found a bump event, use that instead - if last_bump_event_result is not None: - _, new_bump_event_pos = last_bump_event_result - # If we've just joined a remote room, then the last bump event may - # have been backfilled (and so have a negative stream ordering). - # These negative stream orderings can't sensibly be compared, so - # instead we use the membership event position. - if new_bump_event_pos.stream > 0: - bump_stamp = new_bump_event_pos.stream + # Figure out the last bump event in the room if we're in the room. + if room_membership_for_user_at_to_token.membership == Membership.JOIN: + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) + ) + + # But if we found a bump event, use that instead + if last_bump_event_result is not None: + _, new_bump_event_pos = last_bump_event_result + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_event_pos.stream > 0: + bump_stamp = new_bump_event_pos.stream unstable_expanded_timeline = False prev_room_sync_config = previous_connection_state.room_configs.get(room_id) diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 4ed49040c1..fe44aa7abd 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -758,3 +758,48 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0) + + def test_rooms_bump_stamp_invites(self) -> None: + """ + Test that `bump_stamp` is present and points to the membership event, + and not later events, for non-joined rooms + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + ) + + # Invite user1 to the room + invite_response = self.helper.invite(room_id, user2_id, user1_id, tok=user2_tok) + + # More messages happen after the invite + self.helper.send(room_id, "message in room1", tok=user2_tok) + + # We expect the bump_stamp to match the invite. + invite_pos = self.get_success( + self.store.get_position_for_event(invite_response["event_id"]) + ) + + # Doing an SS request should return a `bump_stamp` of the invite event, + # rather than the message that was sent after. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 5, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertEqual( + response_body["rooms"][room_id]["bump_stamp"], invite_pos.stream + ) From 6b770d8bfc8bbc27aa7681436aa47fb41cb7911e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 5 Sep 2024 15:43:37 +0100 Subject: [PATCH 208/332] Revert "Fix bump stamp for non-joined rooms" This reverts commit f73c844403de00630fd773075cefe6f502b54e69. --- synapse/handlers/sliding_sync/__init__.py | 38 ++++++++-------- .../client/sliding_sync/test_rooms_meta.py | 45 ------------------- 2 files changed, 18 insertions(+), 65 deletions(-) diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index f76b846fcc..ac6dc79fdf 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -967,29 +967,27 @@ class SlidingSyncHandler: ) ) + # Figure out the last bump event in the room + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) + ) + # By default, just choose the membership event position bump_stamp = room_membership_for_user_at_to_token.event_pos.stream + # But if we found a bump event, use that instead + if last_bump_event_result is not None: + _, new_bump_event_pos = last_bump_event_result - # Figure out the last bump event in the room if we're in the room. - if room_membership_for_user_at_to_token.membership == Membership.JOIN: - last_bump_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, - to_token.room_key, - event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, - ) - ) - - # But if we found a bump event, use that instead - if last_bump_event_result is not None: - _, new_bump_event_pos = last_bump_event_result - - # If we've just joined a remote room, then the last bump event may - # have been backfilled (and so have a negative stream ordering). - # These negative stream orderings can't sensibly be compared, so - # instead we use the membership event position. - if new_bump_event_pos.stream > 0: - bump_stamp = new_bump_event_pos.stream + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_event_pos.stream > 0: + bump_stamp = new_bump_event_pos.stream unstable_expanded_timeline = False prev_room_sync_config = previous_connection_state.room_configs.get(room_id) diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index fe44aa7abd..4ed49040c1 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -758,48 +758,3 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0) - - def test_rooms_bump_stamp_invites(self) -> None: - """ - Test that `bump_stamp` is present and points to the membership event, - and not later events, for non-joined rooms - """ - - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id = self.helper.create_room_as( - user2_id, - tok=user2_tok, - ) - - # Invite user1 to the room - invite_response = self.helper.invite(room_id, user2_id, user1_id, tok=user2_tok) - - # More messages happen after the invite - self.helper.send(room_id, "message in room1", tok=user2_tok) - - # We expect the bump_stamp to match the invite. - invite_pos = self.get_success( - self.store.get_position_for_event(invite_response["event_id"]) - ) - - # Doing an SS request should return a `bump_stamp` of the invite event, - # rather than the message that was sent after. - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 5, - } - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - self.assertEqual( - response_body["rooms"][room_id]["bump_stamp"], invite_pos.stream - ) From de3363ef58a6088641b9fce51d75b1377f2841f2 Mon Sep 17 00:00:00 2001 From: Johannes Marbach Date: Thu, 5 Sep 2024 18:07:39 +0200 Subject: [PATCH 209/332] Stabilise MSC4156: `server_name` -> `via` (#17650) --- changelog.d/17650.removal | 1 + synapse/config/experimental.py | 3 --- synapse/rest/client/knock.py | 13 ++++--------- synapse/rest/client/room.py | 11 ++++------- 4 files changed, 9 insertions(+), 19 deletions(-) create mode 100644 changelog.d/17650.removal diff --git a/changelog.d/17650.removal b/changelog.d/17650.removal new file mode 100644 index 0000000000..1238815c08 --- /dev/null +++ b/changelog.d/17650.removal @@ -0,0 +1 @@ +Stabilise [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156) by removing the `msc4156_enabled` config setting and defaulting it to `true`. \ No newline at end of file diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 5d99c201a7..99185db93d 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -447,6 +447,3 @@ class ExperimentalConfig(Config): # MSC4151: Report room API (Client-Server API) self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False) - - # MSC4156: Migrate server_name to via - self.msc4156_enabled: bool = experimental.get("msc4156_enabled", False) diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py index e31687fc13..d7a17e1b35 100644 --- a/synapse/rest/client/knock.py +++ b/synapse/rest/client/knock.py @@ -53,7 +53,6 @@ class KnockRoomAliasServlet(RestServlet): super().__init__() self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() - self._support_via = hs.config.experimental.msc4156_enabled async def on_POST( self, @@ -72,15 +71,11 @@ class KnockRoomAliasServlet(RestServlet): # twisted.web.server.Request.args is incorrectly defined as Optional[Any] args: Dict[bytes, List[bytes]] = request.args # type: ignore - remote_room_hosts = parse_strings_from_args( - args, "server_name", required=False - ) - if self._support_via: + # Prefer via over server_name (deprecated with MSC4156) + remote_room_hosts = parse_strings_from_args(args, "via", required=False) + if remote_room_hosts is None: remote_room_hosts = parse_strings_from_args( - args, - "org.matrix.msc4156.via", - default=remote_room_hosts, - required=False, + args, "server_name", required=False ) elif RoomAlias.is_valid(room_identifier): handler = self.room_member_handler diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 83f84e4998..23c909ab14 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -419,7 +419,6 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet): super().__init__(hs) super(ResolveRoomIdMixin, self).__init__(hs) # ensure the Mixin is set up self.auth = hs.get_auth() - self._support_via = hs.config.experimental.msc4156_enabled def register(self, http_server: HttpServer) -> None: # /join/$room_identifier[/$txn_id] @@ -437,13 +436,11 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet): # twisted.web.server.Request.args is incorrectly defined as Optional[Any] args: Dict[bytes, List[bytes]] = request.args # type: ignore - remote_room_hosts = parse_strings_from_args(args, "server_name", required=False) - if self._support_via: + # Prefer via over server_name (deprecated with MSC4156) + remote_room_hosts = parse_strings_from_args(args, "via", required=False) + if remote_room_hosts is None: remote_room_hosts = parse_strings_from_args( - args, - "org.matrix.msc4156.via", - default=remote_room_hosts, - required=False, + args, "server_name", required=False ) room_id, remote_room_hosts = await self.resolve_room_id( room_identifier, From d5accec2e5c857bf6ba71259ea052f5fe173b2eb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Sep 2024 11:12:29 +0100 Subject: [PATCH 210/332] Speed up sliding sync by avoiding copies (#17670) We ended up spending ~10% CPU creating a new dictionary and `_RoomMembershipForUser`, so let's avoid creating new dicts and copying by returning `newly_joined`, `newly_left` and `is_dm` as sets directly. --------- Co-authored-by: Eric Eastwood --- changelog.d/17670.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 22 +- synapse/handlers/sliding_sync/room_lists.py | 175 ++++------ synapse/storage/roommember.py | 14 + tests/handlers/test_sliding_sync.py | 346 +++++++++++--------- 5 files changed, 296 insertions(+), 262 deletions(-) create mode 100644 changelog.d/17670.misc diff --git a/changelog.d/17670.misc b/changelog.d/17670.misc new file mode 100644 index 0000000000..3550679247 --- /dev/null +++ b/changelog.d/17670.misc @@ -0,0 +1 @@ +Small performance improvement in speeding up sliding sync. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index ac6dc79fdf..0f06ffaa11 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -25,8 +25,8 @@ from synapse.events.utils import strip_event from synapse.handlers.relations import BundledAggregations from synapse.handlers.sliding_sync.extensions import SlidingSyncExtensionHandler from synapse.handlers.sliding_sync.room_lists import ( + RoomsForUserType, SlidingSyncRoomLists, - _RoomMembershipForUser, ) from synapse.handlers.sliding_sync.store import SlidingSyncConnectionStore from synapse.logging.opentracing import ( @@ -39,7 +39,9 @@ from synapse.logging.opentracing import ( ) from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.databases.main.stream import PaginateFunction -from synapse.storage.roommember import MemberSummary +from synapse.storage.roommember import ( + MemberSummary, +) from synapse.types import ( JsonDict, PersistedEventPosition, @@ -255,6 +257,8 @@ class SlidingSyncHandler: ], from_token=from_token, to_token=to_token, + newly_joined=room_id in interested_rooms.newly_joined_rooms, + is_dm=room_id in interested_rooms.dm_room_ids, ) # Filter out empty room results during incremental sync @@ -352,7 +356,7 @@ class SlidingSyncHandler: async def get_current_state_ids_at( self, room_id: str, - room_membership_for_user_at_to_token: _RoomMembershipForUser, + room_membership_for_user_at_to_token: RoomsForUserType, state_filter: StateFilter, to_token: StreamToken, ) -> StateMap[str]: @@ -417,7 +421,7 @@ class SlidingSyncHandler: async def get_current_state_at( self, room_id: str, - room_membership_for_user_at_to_token: _RoomMembershipForUser, + room_membership_for_user_at_to_token: RoomsForUserType, state_filter: StateFilter, to_token: StreamToken, ) -> StateMap[EventBase]: @@ -457,9 +461,11 @@ class SlidingSyncHandler: new_connection_state: "MutablePerConnectionState", room_id: str, room_sync_config: RoomSyncConfig, - room_membership_for_user_at_to_token: _RoomMembershipForUser, + room_membership_for_user_at_to_token: RoomsForUserType, from_token: Optional[SlidingSyncStreamToken], to_token: StreamToken, + newly_joined: bool, + is_dm: bool, ) -> SlidingSyncResult.RoomResult: """ Fetch room data for the sync response. @@ -475,6 +481,8 @@ class SlidingSyncHandler: in the room at the time of `to_token`. from_token: The point in the stream to sync from. to_token: The point in the stream to sync up to. + newly_joined: If the user has newly joined the room + is_dm: Whether the room is a DM room """ user = sync_config.user @@ -519,7 +527,7 @@ class SlidingSyncHandler: from_bound = None initial = True ignore_timeline_bound = False - if from_token and not room_membership_for_user_at_to_token.newly_joined: + if from_token and not newly_joined: room_status = previous_connection_state.rooms.have_sent_room(room_id) if room_status.status == HaveSentRoomFlag.LIVE: from_bound = from_token.stream_token.room_key @@ -1044,7 +1052,7 @@ class SlidingSyncHandler: name=room_name, avatar=room_avatar, heroes=heroes, - is_dm=room_membership_for_user_at_to_token.is_dm, + is_dm=is_dm, initial=initial, required_state=list(required_room_state.values()), timeline_events=timeline_events, diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 1423d6ca53..a77b7ef2c3 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -19,7 +19,6 @@ from itertools import chain from typing import ( TYPE_CHECKING, AbstractSet, - Any, Dict, List, Literal, @@ -48,7 +47,11 @@ from synapse.storage.databases.main.state import ( Sentinel as StateSentinel, ) from synapse.storage.databases.main.stream import CurrentStateDeltaMembership -from synapse.storage.roommember import RoomsForUser, RoomsForUserSlidingSync +from synapse.storage.roommember import ( + RoomsForUser, + RoomsForUserSlidingSync, + RoomsForUserStateReset, +) from synapse.types import ( MutableStateMap, PersistedEventPosition, @@ -76,6 +79,11 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +# Helper definition for the types that we might return. We do this to avoid +# copying data between types (which can be expensive for many rooms). +RoomsForUserType = Union[RoomsForUserStateReset, RoomsForUser, RoomsForUserSlidingSync] + + @attr.s(auto_attribs=True, slots=True, frozen=True) class SlidingSyncInterestedRooms: """The set of rooms and metadata a client is interested in based on their @@ -91,13 +99,22 @@ class SlidingSyncInterestedRooms: includes the rooms that *may* have relevant updates. Rooms not in this map will definitely not have room updates (though extensions may have updates in these rooms). + newly_joined_rooms: The set of rooms that were joined in the token range + and the user is still joined to at the end of this range. + newly_left_rooms: The set of rooms that we left in the token range + and are still "leave" at the end of this range. + dm_room_ids: The set of rooms the user consider as direct-message (DM) rooms """ lists: Mapping[str, SlidingSyncResult.SlidingWindowList] relevant_room_map: Mapping[str, RoomSyncConfig] relevant_rooms_to_send_map: Mapping[str, RoomSyncConfig] all_rooms: Set[str] - room_membership_for_user_map: Mapping[str, "_RoomMembershipForUser"] + room_membership_for_user_map: Mapping[str, RoomsForUserType] + + newly_joined_rooms: AbstractSet[str] + newly_left_rooms: AbstractSet[str] + dm_room_ids: AbstractSet[str] class Sentinel(enum.Enum): @@ -106,47 +123,10 @@ class Sentinel(enum.Enum): UNSET_SENTINEL = object() -@attr.s(slots=True, frozen=True, auto_attribs=True) -class _RoomMembershipForUser: - """ - Attributes: - room_id: The room ID of the membership event - event_id: The event ID of the membership event - event_pos: The stream position of the membership event - membership: The membership state of the user in the room - sender: The person who sent the membership event - newly_joined: Whether the user newly joined the room during the given token - range and is still joined to the room at the end of this range. - newly_left: Whether the user newly left (or kicked) the room during the given - token range and is still "leave" at the end of this range. - is_dm: Whether this user considers this room as a direct-message (DM) room - """ - - room_id: str - # Optional because state resets can affect room membership without a corresponding event. - event_id: Optional[str] - # Even during a state reset which removes the user from the room, we expect this to - # be set because `current_state_delta_stream` will note the position that the reset - # happened. - event_pos: PersistedEventPosition - # Even during a state reset which removes the user from the room, we expect this to - # be set to `LEAVE` because we can make that assumption based on the situaton (see - # `get_current_state_delta_membership_changes_for_user(...)`) - membership: str - # Optional because state resets can affect room membership without a corresponding event. - sender: Optional[str] - newly_joined: bool - newly_left: bool - is_dm: bool - - def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser": - return attr.evolve(self, **kwds) - - def filter_membership_for_sync( *, user_id: str, - room_membership_for_user: Union[_RoomMembershipForUser, RoomsForUserSlidingSync], + room_membership_for_user: RoomsForUserType, newly_left: bool, ) -> bool: """ @@ -479,22 +459,10 @@ class SlidingSyncRoomLists: relevant_room_map=relevant_room_map, relevant_rooms_to_send_map=relevant_rooms_to_send_map, all_rooms=all_rooms, - room_membership_for_user_map={ - # FIXME: Ideally we wouldn't have to create a new - # `_RoomMembershipForUser` here and instead just return - # `newly_joined_room_ids` directly, to save CPU time. - room_id: _RoomMembershipForUser( - room_id=room_id, - event_id=membership_info.event_id, - event_pos=membership_info.event_pos, - sender=membership_info.sender, - membership=membership_info.membership, - newly_joined=room_id in newly_joined_room_ids, - newly_left=room_id in newly_left_room_map, - is_dm=room_id in dm_room_ids, - ) - for room_id, membership_info in room_membership_for_user_map.items() - }, + room_membership_for_user_map=room_membership_for_user_map, + newly_joined_rooms=newly_joined_room_ids, + newly_left_rooms=set(newly_left_room_map), + dm_room_ids=dm_room_ids, ) async def _compute_interested_rooms_fallback( @@ -506,12 +474,16 @@ class SlidingSyncRoomLists: ) -> SlidingSyncInterestedRooms: """Fallback code when the database background updates haven't completed yet.""" - room_membership_for_user_map = ( - await self.get_room_membership_for_user_at_to_token( - sync_config.user, to_token, from_token - ) + ( + room_membership_for_user_map, + newly_joined_room_ids, + newly_left_room_ids, + ) = await self.get_room_membership_for_user_at_to_token( + sync_config.user, to_token, from_token ) + dm_room_ids = await self._get_dm_rooms_for_user(sync_config.user.to_string()) + # Assemble sliding window lists lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} # Keep track of the rooms that we can display and need to fetch more info about @@ -525,6 +497,7 @@ class SlidingSyncRoomLists: sync_room_map = await self.filter_rooms_relevant_for_sync( user=sync_config.user, room_membership_for_user_map=room_membership_for_user_map, + newly_left_room_ids=newly_left_room_ids, ) for list_key, list_config in sync_config.lists.items(): @@ -536,6 +509,7 @@ class SlidingSyncRoomLists: sync_room_map, list_config.filters, to_token, + dm_room_ids, ) # Find which rooms are partially stated and may need to be filtered out @@ -679,6 +653,9 @@ class SlidingSyncRoomLists: relevant_rooms_to_send_map=relevant_rooms_to_send_map, all_rooms=all_rooms, room_membership_for_user_map=room_membership_for_user_map, + newly_joined_rooms=newly_joined_room_ids, + newly_left_rooms=newly_left_room_ids, + dm_room_ids=dm_room_ids, ) async def _filter_relevant_room_to_send( @@ -755,7 +732,7 @@ class SlidingSyncRoomLists: async def _get_rewind_changes_to_current_membership_to_token( self, user: UserID, - rooms_for_user: Mapping[str, Union[RoomsForUser, RoomsForUserSlidingSync]], + rooms_for_user: Mapping[str, RoomsForUserType], to_token: StreamToken, ) -> Mapping[str, Optional[RoomsForUser]]: """ @@ -907,7 +884,7 @@ class SlidingSyncRoomLists: user: UserID, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Dict[str, _RoomMembershipForUser]: + ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Fetch room IDs that the user has had membership in (the full room list including long-lost left rooms that will be filtered, sorted, and sliced). @@ -926,8 +903,11 @@ class SlidingSyncRoomLists: from_token: The point in the stream to sync from. Returns: - A dictionary of room IDs that the user has had membership in along with - membership information in that room at the time of `to_token`. + A 3-tuple of: + - A dictionary of room IDs that the user has had membership in along with + membership information in that room at the time of `to_token`. + - Set of newly joined rooms + - Set of newly left rooms """ user_id = user.to_string() @@ -944,12 +924,14 @@ class SlidingSyncRoomLists: # If the user has never joined any rooms before, we can just return an empty list if not room_for_user_list: - return {} + return {}, set(), set() # Since we fetched the users room list at some point in time after the # tokens, we need to revert/rewind some membership changes to match the point in # time of the `to_token`. - rooms_for_user = {room.room_id: room for room in room_for_user_list} + rooms_for_user: Dict[str, RoomsForUserType] = { + room.room_id: room for room in room_for_user_list + } changes = await self._get_rewind_changes_to_current_membership_to_token( user, rooms_for_user, to_token ) @@ -966,42 +948,23 @@ class SlidingSyncRoomLists: user_id, to_token=to_token, from_token=from_token ) - dm_room_ids = await self._get_dm_rooms_for_user(user_id) - - # Our working list of rooms that can show up in the sync response - sync_room_id_set = { - room_for_user.room_id: _RoomMembershipForUser( - room_id=room_for_user.room_id, - event_id=room_for_user.event_id, - event_pos=room_for_user.event_pos, - membership=room_for_user.membership, - sender=room_for_user.sender, - newly_joined=room_id in newly_joined_room_ids, - newly_left=room_id in newly_left_room_ids, - is_dm=room_id in dm_room_ids, - ) - for room_id, room_for_user in rooms_for_user.items() - } - # Ensure we have entries for rooms that the user has been "state reset" # out of. These are rooms appear in the `newly_left_rooms` map but # aren't in the `rooms_for_user` map. for room_id, left_event_pos in newly_left_room_ids.items(): - if room_id in sync_room_id_set: + if room_id in rooms_for_user: continue - sync_room_id_set[room_id] = _RoomMembershipForUser( + rooms_for_user[room_id] = RoomsForUserStateReset( room_id=room_id, event_id=None, event_pos=left_event_pos, membership=Membership.LEAVE, sender=None, - newly_joined=False, - newly_left=True, - is_dm=room_id in dm_room_ids, + room_version_id=await self.store.get_room_version_id(room_id), ) - return sync_room_id_set + return rooms_for_user, newly_joined_room_ids, set(newly_left_room_ids) @trace async def _get_newly_joined_and_left_rooms( @@ -1009,7 +972,7 @@ class SlidingSyncRoomLists: user_id: str, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Tuple[StrCollection, Mapping[str, PersistedEventPosition]]: + ) -> Tuple[AbstractSet[str], Mapping[str, PersistedEventPosition]]: """Fetch the sets of rooms that the user newly joined or left in the given token range. @@ -1162,8 +1125,9 @@ class SlidingSyncRoomLists: async def filter_rooms_relevant_for_sync( self, user: UserID, - room_membership_for_user_map: Dict[str, _RoomMembershipForUser], - ) -> Dict[str, _RoomMembershipForUser]: + room_membership_for_user_map: Dict[str, RoomsForUserType], + newly_left_room_ids: AbstractSet[str], + ) -> Dict[str, RoomsForUserType]: """ Filter room IDs that should/can be listed for this user in the sync response (the full room list that will be further filtered, sorted, and sliced). @@ -1184,6 +1148,7 @@ class SlidingSyncRoomLists: Args: user: User that is syncing room_membership_for_user_map: Room membership for the user + newly_left_room_ids: The set of room IDs we have newly left Returns: A dictionary of room IDs that should be listed in the sync response along @@ -1198,7 +1163,7 @@ class SlidingSyncRoomLists: if filter_membership_for_sync( user_id=user_id, room_membership_for_user=room_membership_for_user, - newly_left=room_membership_for_user.newly_left, + newly_left=room_id in newly_left_room_ids, ) } @@ -1207,9 +1172,9 @@ class SlidingSyncRoomLists: async def check_room_subscription_allowed_for_user( self, room_id: str, - room_membership_for_user_map: Dict[str, _RoomMembershipForUser], + room_membership_for_user_map: Dict[str, RoomsForUserType], to_token: StreamToken, - ) -> Optional[_RoomMembershipForUser]: + ) -> Optional[RoomsForUserType]: """ Check whether the user is allowed to see the room based on whether they have ever had membership in the room or if the room is `world_readable`. @@ -1274,7 +1239,7 @@ class SlidingSyncRoomLists: async def _bulk_get_stripped_state_for_rooms_from_sync_room_map( self, room_ids: StrCollection, - sync_room_map: Dict[str, _RoomMembershipForUser], + sync_room_map: Dict[str, RoomsForUserType], ) -> Dict[str, Optional[StateMap[StrippedStateEvent]]]: """ Fetch stripped state for a list of room IDs. Stripped state is only @@ -1371,7 +1336,7 @@ class SlidingSyncRoomLists: "room_encryption", ], room_ids: Set[str], - sync_room_map: Dict[str, _RoomMembershipForUser], + sync_room_map: Dict[str, RoomsForUserType], to_token: StreamToken, room_id_to_stripped_state_map: Dict[ str, Optional[StateMap[StrippedStateEvent]] @@ -1535,10 +1500,11 @@ class SlidingSyncRoomLists: async def filter_rooms( self, user: UserID, - sync_room_map: Dict[str, _RoomMembershipForUser], + sync_room_map: Dict[str, RoomsForUserType], filters: SlidingSyncConfig.SlidingSyncList.Filters, to_token: StreamToken, - ) -> Dict[str, _RoomMembershipForUser]: + dm_room_ids: AbstractSet[str], + ) -> Dict[str, RoomsForUserType]: """ Filter rooms based on the sync request. @@ -1548,6 +1514,7 @@ class SlidingSyncRoomLists: information in the room at the time of `to_token`. filters: Filters to apply to_token: We filter based on the state of the room at this token + dm_room_ids: Set of room IDs that are DMs for the user Returns: A filtered dictionary of room IDs along with membership information in the @@ -1567,14 +1534,14 @@ class SlidingSyncRoomLists: filtered_room_id_set = { room_id for room_id in filtered_room_id_set - if sync_room_map[room_id].is_dm + if room_id in dm_room_ids } else: # Only non-DM rooms please filtered_room_id_set = { room_id for room_id in filtered_room_id_set - if not sync_room_map[room_id].is_dm + if room_id not in dm_room_ids } if filters.spaces is not None: @@ -1862,9 +1829,9 @@ class SlidingSyncRoomLists: @trace async def sort_rooms( self, - sync_room_map: Dict[str, _RoomMembershipForUser], + sync_room_map: Dict[str, RoomsForUserType], to_token: StreamToken, - ) -> List[_RoomMembershipForUser]: + ) -> List[RoomsForUserType]: """ Sort by `stream_ordering` of the last event that the user should see in the room. `stream_ordering` is unique so we get a stable sort. diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 09213627ec..af71c01c17 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -52,6 +52,20 @@ class RoomsForUserSlidingSync: is_encrypted: bool +@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) +class RoomsForUserStateReset: + """A version of `RoomsForUser` that supports optional sender and event ID + fields, to handle state resets. State resets can affect room membership + without a corresponding event so that information isn't always available.""" + + room_id: str + sender: Optional[str] + membership: str + event_id: Optional[str] + event_pos: PersistedEventPosition + room_version_id: str + + @attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) class GetRoomsForUserWithStreamOrdering: room_id: str diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 2ef9f665f9..7511a5b00a 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -18,7 +18,7 @@ # # import logging -from typing import Dict, List, Optional +from typing import AbstractSet, Dict, List, Optional, Tuple from unittest.mock import patch from parameterized import parameterized @@ -37,9 +37,9 @@ from synapse.api.room_versions import RoomVersions from synapse.events import StrippedStateEvent, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.handlers.sliding_sync import ( + RoomsForUserType, RoomSyncConfig, StateValues, - _RoomMembershipForUser, ) from synapse.rest import admin from synapse.rest.client import knock, login, room @@ -606,7 +606,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): now_token = self.event_sources.get_current_token() - room_id_results = self.get_success( + room_id_results, _, _ = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=now_token, @@ -633,7 +633,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_token, @@ -651,8 +651,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id].membership, Membership.JOIN) # We should be considered `newly_joined` because we joined during the token # range - self.assertEqual(room_id_results[room_id].newly_joined, True) - self.assertEqual(room_id_results[room_id].newly_left, False) + self.assertTrue(room_id in newly_joined) + self.assertTrue(room_id not in newly_left) def test_get_already_joined_room(self) -> None: """ @@ -668,7 +668,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room_token, @@ -685,8 +685,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id].newly_joined, False) - self.assertEqual(room_id_results[room_id].newly_left, False) + self.assertTrue(room_id not in newly_joined) + self.assertTrue(room_id not in newly_left) def test_get_invited_banned_knocked_room(self) -> None: """ @@ -742,7 +742,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_token, @@ -766,24 +766,24 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): invite_response["event_id"], ) self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE) - self.assertEqual(room_id_results[invited_room_id].newly_joined, False) - self.assertEqual(room_id_results[invited_room_id].newly_left, False) + self.assertTrue(invited_room_id not in newly_joined) + self.assertTrue(invited_room_id not in newly_left) self.assertEqual( room_id_results[ban_room_id].event_id, ban_response["event_id"], ) self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN) - self.assertEqual(room_id_results[ban_room_id].newly_joined, False) - self.assertEqual(room_id_results[ban_room_id].newly_left, False) + self.assertTrue(ban_room_id not in newly_joined) + self.assertTrue(ban_room_id not in newly_left) self.assertEqual( room_id_results[knock_room_id].event_id, knock_room_membership_state_event.event_id, ) self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK) - self.assertEqual(room_id_results[knock_room_id].newly_joined, False) - self.assertEqual(room_id_results[knock_room_id].newly_left, False) + self.assertTrue(knock_room_id not in newly_joined) + self.assertTrue(knock_room_id not in newly_left) def test_get_kicked_room(self) -> None: """ @@ -814,7 +814,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_kick_token = self.event_sources.get_current_token() - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_kick_token, @@ -833,8 +833,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) # We should *NOT* be `newly_joined` because we were not joined at the the time # of the `to_token`. - self.assertEqual(room_id_results[kick_room_id].newly_joined, False) - self.assertEqual(room_id_results[kick_room_id].newly_left, False) + self.assertTrue(kick_room_id not in newly_joined) + self.assertTrue(kick_room_id not in newly_left) def test_forgotten_rooms(self) -> None: """ @@ -907,7 +907,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(channel.code, 200, channel.result) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room_forgets, @@ -937,7 +937,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room2_token = self.event_sources.get_current_token() - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, @@ -954,8 +954,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) # We should *NOT* be `newly_joined` or `newly_left` because that happened before # the from/to range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) self.assertEqual( room_id_results[room_id2].event_id, @@ -963,8 +963,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) # We should *NOT* be `newly_joined` because we are instead `newly_left` - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, True) + self.assertTrue(room_id2 not in newly_joined) + self.assertTrue(room_id2 in newly_left) def test_no_joins_after_to_token(self) -> None: """ @@ -987,7 +987,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id2, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, @@ -1003,8 +1003,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_join_during_range_and_left_room_after_to_token(self) -> None: """ @@ -1027,7 +1027,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Leave the room after we already have our tokens leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, @@ -1052,8 +1052,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_join_before_range_and_left_room_after_to_token(self) -> None: """ @@ -1074,7 +1074,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Leave the room after we already have our tokens leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, @@ -1098,8 +1098,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_kicked_before_range_and_left_after_to_token(self) -> None: """ @@ -1138,7 +1138,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_response2 = self.helper.join(kick_room_id, user1_id, tok=user1_tok) leave_response = self.helper.leave(kick_room_id, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_kick_token, @@ -1165,8 +1165,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) # We should *NOT* be `newly_joined` because we were kicked - self.assertEqual(room_id_results[kick_room_id].newly_joined, False) - self.assertEqual(room_id_results[kick_room_id].newly_left, False) + self.assertTrue(kick_room_id not in newly_joined) + self.assertTrue(kick_room_id not in newly_left) def test_newly_left_during_range_and_join_leave_after_to_token(self) -> None: """ @@ -1194,7 +1194,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, @@ -1221,8 +1221,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) # We should *NOT* be `newly_joined` because we are actually `newly_left` during # the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, True) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 in newly_left) def test_newly_left_during_range_and_join_after_to_token(self) -> None: """ @@ -1249,7 +1249,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Join the room after we already have our tokens join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, @@ -1275,8 +1275,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) # We should *NOT* be `newly_joined` because we are actually `newly_left` during # the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, True) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 in newly_left) def test_no_from_token(self) -> None: """ @@ -1308,7 +1308,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Join the room2 after we already have our tokens self.helper.join(room_id2, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=None, @@ -1328,8 +1328,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined`/`newly_left` because there is no # `from_token` to define a "live" range to compare against - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) # Room2 # It should be pointing to the latest membership event in the from/to range @@ -1340,8 +1340,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) # We should *NOT* be `newly_joined`/`newly_left` because there is no # `from_token` to define a "live" range to compare against - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, False) + self.assertTrue(room_id2 not in newly_joined) + self.assertTrue(room_id2 not in newly_left) def test_from_token_ahead_of_to_token(self) -> None: """ @@ -1390,7 +1390,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Join the room4 after we already have our tokens self.helper.join(room_id4, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=from_token, @@ -1424,8 +1424,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined`/`newly_left` because we joined `room1` # before either of the tokens - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) # Room2 # It should be pointing to the latest membership event in the from/to range @@ -1436,8 +1436,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) # We should *NOT* be `newly_joined`/`newly_left` because we joined and left # `room1` before either of the tokens - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, False) + self.assertTrue(room_id2 not in newly_joined) + self.assertTrue(room_id2 not in newly_left) def test_leave_before_range_and_join_leave_after_to_token(self) -> None: """ @@ -1463,7 +1463,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.helper.join(room_id1, user1_id, tok=user1_tok) self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, @@ -1480,8 +1480,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) # We should *NOT* be `newly_joined`/`newly_left` because we joined and left # `room1` before either of the tokens - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_leave_before_range_and_join_after_to_token(self) -> None: """ @@ -1506,7 +1506,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Join the room after we already have our tokens self.helper.join(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, @@ -1523,8 +1523,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) # We should *NOT* be `newly_joined`/`newly_left` because we joined and left # `room1` before either of the tokens - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_join_leave_multiple_times_during_range_and_after_to_token( self, @@ -1556,7 +1556,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok) leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, @@ -1584,10 +1584,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertTrue(room_id1 in newly_joined) # We should *NOT* be `newly_left` because we joined during the token range and # was still joined at the end of the range - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_left) def test_join_leave_multiple_times_before_range_and_after_to_token( self, @@ -1618,7 +1618,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok) leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, @@ -1646,8 +1646,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_invite_before_range_and_join_leave_after_to_token( self, @@ -1677,7 +1677,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_respsonse = self.helper.join(room_id1, user1_id, tok=user1_tok) leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, @@ -1703,8 +1703,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id1].membership, Membership.INVITE) # We should *NOT* be `newly_joined` because we were only invited before the # token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_join_and_display_name_changes_in_token_range( self, @@ -1751,7 +1751,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): tok=user1_tok, ) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, @@ -1780,8 +1780,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_display_name_changes_in_token_range( self, @@ -1816,7 +1816,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_change1_token = self.event_sources.get_current_token() - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, @@ -1842,8 +1842,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_display_name_changes_before_and_after_token_range( self, @@ -1888,7 +1888,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): tok=user1_tok, ) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, @@ -1917,8 +1917,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_display_name_changes_leave_after_token_range( self, @@ -1970,7 +1970,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Leave after the token self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, @@ -1999,8 +1999,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_display_name_changes_join_after_token_range( self, @@ -2038,7 +2038,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): tok=user1_tok, ) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, @@ -2074,7 +2074,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_more_changes_token = self.event_sources.get_current_token() - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=after_room1_token, @@ -2092,8 +2092,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be considered `newly_joined` because there is some non-join event in # between our latest join event. - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_newly_joined_only_joins_during_token_range( self, @@ -2139,7 +2139,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room1_token = self.event_sources.get_current_token() - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room1_token, @@ -2168,8 +2168,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we first joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_multiple_rooms_are_not_confused( self, @@ -2215,7 +2215,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Leave room3 self.helper.leave(room_id3, user1_id, tok=user1_tok) - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_room3_token, @@ -2244,8 +2244,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) # We should *NOT* be `newly_joined`/`newly_left` because we were invited and left # before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) # Room2 # It should be pointing to the latest membership event in the from/to range @@ -2256,8 +2256,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id2].membership, Membership.INVITE) # We should *NOT* be `newly_joined`/`newly_left` because we were invited before # the token range - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, False) + self.assertTrue(room_id2 not in newly_joined) + self.assertTrue(room_id2 not in newly_left) # Room3 # It should be pointing to the latest membership event in the from/to range @@ -2268,8 +2268,8 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.assertEqual(room_id_results[room_id3].membership, Membership.LEAVE) # We should be `newly_left` because we were invited and left during # the token range - self.assertEqual(room_id_results[room_id3].newly_joined, False) - self.assertEqual(room_id_results[room_id3].newly_left, True) + self.assertTrue(room_id3 not in newly_joined) + self.assertTrue(room_id3 in newly_left) def test_state_reset(self) -> None: """ @@ -2351,7 +2351,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_reset_token = self.event_sources.get_current_token() # The function under test - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_reset_token, @@ -2370,9 +2370,9 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # State reset caused us to leave the room and there is no corresponding leave event self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertTrue(room_id1 not in newly_joined) # We should be `newly_left` because we were removed via state reset during the from/to range - self.assertEqual(room_id_results[room_id1].newly_left, True) + self.assertTrue(room_id1 in newly_left) class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCase): @@ -2565,7 +2565,7 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa self.get_success(actx.__aexit__(None, None, None)) # The function under test - room_id_results = self.get_success( + room_id_results, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( UserID.from_string(user1_id), from_token=before_stuck_activity_token, @@ -2590,8 +2590,8 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa ) self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) # Room2 # It should be pointing to the latest membership event in the from/to range @@ -2606,8 +2606,8 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa # `stuck_activity_token` is generated, the stream position for worker2 wasn't # advanced to the join yet. Looking at the `instance_map`, the join technically # comes after `stuck_activity_token`. - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, False) + self.assertTrue(room_id2 not in newly_joined) + self.assertTrue(room_id2 not in newly_left) # Room3 # It should be pointing to the latest membership event in the from/to range @@ -2617,8 +2617,8 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa ) self.assertEqual(room_id_results[room_id3].membership, Membership.JOIN) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id3].newly_joined, True) - self.assertEqual(room_id_results[room_id3].newly_left, False) + self.assertTrue(room_id3 in newly_joined) + self.assertTrue(room_id3 not in newly_left) class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): @@ -2651,11 +2651,11 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): user: UserID, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Dict[str, _RoomMembershipForUser]: + ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Get the rooms the user should be syncing with """ - room_membership_for_user_map = self.get_success( + room_membership_for_user_map, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( user=user, from_token=from_token, @@ -2666,10 +2666,11 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync( user=user, room_membership_for_user_map=room_membership_for_user_map, + newly_left_room_ids=newly_left, ) ) - return filtered_sync_room_map + return filtered_sync_room_map, newly_joined, newly_left def test_no_rooms(self) -> None: """ @@ -2680,7 +2681,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): now_token = self.event_sources.get_current_token() - room_id_results = self._get_sync_room_ids_for_user( + room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=now_token, to_token=now_token, @@ -2745,7 +2746,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() - room_id_results = self._get_sync_room_ids_for_user( + room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=before_room_token, to_token=after_room_token, @@ -2768,32 +2769,32 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): join_response["event_id"], ) self.assertEqual(room_id_results[join_room_id].membership, Membership.JOIN) - self.assertEqual(room_id_results[join_room_id].newly_joined, True) - self.assertEqual(room_id_results[join_room_id].newly_left, False) + self.assertTrue(join_room_id in newly_joined) + self.assertTrue(join_room_id not in newly_left) self.assertEqual( room_id_results[invited_room_id].event_id, invite_response["event_id"], ) self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE) - self.assertEqual(room_id_results[invited_room_id].newly_joined, False) - self.assertEqual(room_id_results[invited_room_id].newly_left, False) + self.assertTrue(invited_room_id not in newly_joined) + self.assertTrue(invited_room_id not in newly_left) self.assertEqual( room_id_results[ban_room_id].event_id, ban_response["event_id"], ) self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN) - self.assertEqual(room_id_results[ban_room_id].newly_joined, False) - self.assertEqual(room_id_results[ban_room_id].newly_left, False) + self.assertTrue(ban_room_id not in newly_joined) + self.assertTrue(ban_room_id not in newly_left) self.assertEqual( room_id_results[knock_room_id].event_id, knock_room_membership_state_event.event_id, ) self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK) - self.assertEqual(room_id_results[knock_room_id].newly_joined, False) - self.assertEqual(room_id_results[knock_room_id].newly_left, False) + self.assertTrue(knock_room_id not in newly_joined) + self.assertTrue(knock_room_id not in newly_left) def test_only_newly_left_rooms_show_up(self) -> None: """ @@ -2816,7 +2817,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): after_room2_token = self.event_sources.get_current_token() - room_id_results = self._get_sync_room_ids_for_user( + room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room2_token, @@ -2829,8 +2830,8 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): _leave_response2["event_id"], ) # We should *NOT* be `newly_joined` because we are instead `newly_left` - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, True) + self.assertTrue(room_id2 not in newly_joined) + self.assertTrue(room_id2 in newly_left) def test_get_kicked_room(self) -> None: """ @@ -2861,7 +2862,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): after_kick_token = self.event_sources.get_current_token() - room_id_results = self._get_sync_room_ids_for_user( + room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=after_kick_token, to_token=after_kick_token, @@ -2878,8 +2879,8 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) # We should *NOT* be `newly_joined` because we were not joined at the the time # of the `to_token`. - self.assertEqual(room_id_results[kick_room_id].newly_joined, False) - self.assertEqual(room_id_results[kick_room_id].newly_left, False) + self.assertTrue(kick_room_id not in newly_joined) + self.assertTrue(kick_room_id not in newly_left) def test_state_reset(self) -> None: """ @@ -2961,7 +2962,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): after_reset_token = self.event_sources.get_current_token() # The function under test - room_id_results = self._get_sync_room_ids_for_user( + room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=before_reset_token, to_token=after_reset_token, @@ -2978,9 +2979,9 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): # State reset caused us to leave the room and there is no corresponding leave event self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertTrue(room_id1 not in newly_joined) # We should be `newly_left` because we were removed via state reset during the from/to range - self.assertEqual(room_id_results[room_id1].newly_left, True) + self.assertTrue(room_id1 in newly_left) class FilterRoomsTestCase(HomeserverTestCase): @@ -3012,11 +3013,11 @@ class FilterRoomsTestCase(HomeserverTestCase): user: UserID, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Dict[str, _RoomMembershipForUser]: + ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Get the rooms the user should be syncing with """ - room_membership_for_user_map = self.get_success( + room_membership_for_user_map, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( user=user, from_token=from_token, @@ -3027,10 +3028,11 @@ class FilterRoomsTestCase(HomeserverTestCase): self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync( user=user, room_membership_for_user_map=room_membership_for_user_map, + newly_left_room_ids=newly_left, ) ) - return filtered_sync_room_map + return filtered_sync_room_map, newly_joined, newly_left def _create_dm_room( self, @@ -3174,8 +3176,12 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() + dm_room_ids = self.get_success( + self.sliding_sync_handler.room_lists._get_dm_rooms_for_user(user1_id) + ) + # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -3190,6 +3196,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_dm=True, ), after_rooms_token, + dm_room_ids=dm_room_ids, ) ) @@ -3204,6 +3211,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_dm=False, ), after_rooms_token, + dm_room_ids=dm_room_ids, ) ) @@ -3231,7 +3239,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -3246,6 +3254,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=True, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3260,6 +3269,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=False, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3293,7 +3303,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), # We're using a `from_token` so that the room is considered `newly_left` and # appears in our list of relevant sync rooms @@ -3310,6 +3320,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=True, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3324,6 +3335,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=False, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3367,7 +3379,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), # We're using a `from_token` so that the room is considered `newly_left` and # appears in our list of relevant sync rooms @@ -3384,6 +3396,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=True, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3398,6 +3411,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=False, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3440,7 +3454,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), # We're using a `from_token` so that the room is considered `newly_left` and # appears in our list of relevant sync rooms @@ -3457,6 +3471,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=True, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3478,6 +3493,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=False, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3512,7 +3528,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -3527,6 +3543,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=True, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3543,6 +3560,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=False, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3598,7 +3616,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -3613,6 +3631,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=True, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3631,6 +3650,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=False, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3679,7 +3699,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -3694,6 +3714,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=True, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3710,6 +3731,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_encrypted=False, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3739,7 +3761,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -3754,6 +3776,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_invite=True, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3768,6 +3791,7 @@ class FilterRoomsTestCase(HomeserverTestCase): is_invite=False, ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3806,7 +3830,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -3819,6 +3843,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3831,6 +3856,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3845,6 +3871,7 @@ class FilterRoomsTestCase(HomeserverTestCase): room_types=[None, RoomTypes.SPACE] ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3859,6 +3886,7 @@ class FilterRoomsTestCase(HomeserverTestCase): room_types=["org.matrix.foobarbaz"] ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3897,7 +3925,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -3910,6 +3938,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(not_room_types=[None]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3924,6 +3953,7 @@ class FilterRoomsTestCase(HomeserverTestCase): not_room_types=[RoomTypes.SPACE] ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3938,6 +3968,7 @@ class FilterRoomsTestCase(HomeserverTestCase): not_room_types=[None, RoomTypes.SPACE] ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3953,6 +3984,7 @@ class FilterRoomsTestCase(HomeserverTestCase): room_types=[None], not_room_types=[None] ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -3969,6 +4001,7 @@ class FilterRoomsTestCase(HomeserverTestCase): room_types=[None, RoomTypes.SPACE], not_room_types=[None] ), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4002,7 +4035,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), # We're using a `from_token` so that the room is considered `newly_left` and # appears in our list of relevant sync rooms @@ -4017,6 +4050,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4029,6 +4063,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4071,7 +4106,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), # We're using a `from_token` so that the room is considered `newly_left` and # appears in our list of relevant sync rooms @@ -4086,6 +4121,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4098,6 +4134,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4131,7 +4168,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -4144,6 +4181,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4158,6 +4196,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4207,7 +4246,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -4220,6 +4259,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4234,6 +4274,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4284,7 +4325,7 @@ class FilterRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -4297,6 +4338,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4311,6 +4353,7 @@ class FilterRoomsTestCase(HomeserverTestCase): sync_room_map, SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), after_rooms_token, + dm_room_ids=set(), ) ) @@ -4348,11 +4391,11 @@ class SortRoomsTestCase(HomeserverTestCase): user: UserID, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Dict[str, _RoomMembershipForUser]: + ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Get the rooms the user should be syncing with """ - room_membership_for_user_map = self.get_success( + room_membership_for_user_map, newly_joined, newly_left = self.get_success( self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( user=user, from_token=from_token, @@ -4363,10 +4406,11 @@ class SortRoomsTestCase(HomeserverTestCase): self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync( user=user, room_membership_for_user_map=room_membership_for_user_map, + newly_left_room_ids=newly_left, ) ) - return filtered_sync_room_map + return filtered_sync_room_map, newly_joined, newly_left def test_sort_activity_basic(self) -> None: """ @@ -4387,7 +4431,7 @@ class SortRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -4468,7 +4512,7 @@ class SortRoomsTestCase(HomeserverTestCase): self.helper.send(room_id3, "activity in room3", tok=user2_tok) # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=before_rooms_token, to_token=after_rooms_token, @@ -4532,7 +4576,7 @@ class SortRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, From 786de8570bf7ff14a8b98dbad6b9b9c18b05faf7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Sep 2024 11:12:54 +0100 Subject: [PATCH 211/332] Speed up fetching partial-state rooms on sliding sync (#17666) Instead of having a large cache of `room_id -> bool` about whether a room is partially stated, replace with a "fetch rooms the user is which are partially-stated". This is a lot faster as the set of partially stated rooms at any point across the whole server is small, and so such a query is fast. The main issue with the bulk cache lookup is the CPU time looking all the rooms up in the cache. --- changelog.d/17666.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 28 ++++++--------------- synapse/storage/databases/main/room.py | 26 +++++++++++++++++++ 3 files changed, 35 insertions(+), 20 deletions(-) create mode 100644 changelog.d/17666.misc diff --git a/changelog.d/17666.misc b/changelog.d/17666.misc new file mode 100644 index 0000000000..3550679247 --- /dev/null +++ b/changelog.d/17666.misc @@ -0,0 +1 @@ +Small performance improvement in speeding up sliding sync. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index a77b7ef2c3..8d6d8be44f 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -333,11 +333,7 @@ class SlidingSyncRoomLists: # Find which rooms are partially stated and may need to be filtered out # depending on the `required_state` requested (see below). - partial_state_room_map = ( - await self.store.is_partial_state_room_batched( - filtered_sync_room_map.keys() - ) - ) + partial_state_rooms = await self.store.get_partial_rooms() # Since creating the `RoomSyncConfig` takes some work, let's just do it # once. @@ -349,7 +345,7 @@ class SlidingSyncRoomLists: filtered_sync_room_map = { room_id: room for room_id, room in filtered_sync_room_map.items() - if not partial_state_room_map.get(room_id) + if room_id not in partial_state_rooms } all_rooms.update(filtered_sync_room_map) @@ -409,9 +405,7 @@ class SlidingSyncRoomLists: with start_active_span("assemble_room_subscriptions"): # Find which rooms are partially stated and may need to be filtered out # depending on the `required_state` requested (see below). - partial_state_room_map = await self.store.is_partial_state_room_batched( - sync_config.room_subscriptions.keys() - ) + partial_state_rooms = await self.store.get_partial_rooms() for ( room_id, @@ -431,7 +425,7 @@ class SlidingSyncRoomLists: # Exclude partially-stated rooms if we must wait for the room to be # fully-stated if room_sync_config.must_await_full_state(self.is_mine_id): - if partial_state_room_map.get(room_id): + if room_id in partial_state_rooms: continue all_rooms.add(room_id) @@ -514,11 +508,7 @@ class SlidingSyncRoomLists: # Find which rooms are partially stated and may need to be filtered out # depending on the `required_state` requested (see below). - partial_state_room_map = ( - await self.store.is_partial_state_room_batched( - filtered_sync_room_map.keys() - ) - ) + partial_state_rooms = await self.store.get_partial_rooms() # Since creating the `RoomSyncConfig` takes some work, let's just do it # once. @@ -530,7 +520,7 @@ class SlidingSyncRoomLists: filtered_sync_room_map = { room_id: room for room_id, room in filtered_sync_room_map.items() - if not partial_state_room_map.get(room_id) + if room_id not in partial_state_rooms } all_rooms.update(filtered_sync_room_map) @@ -590,9 +580,7 @@ class SlidingSyncRoomLists: with start_active_span("assemble_room_subscriptions"): # Find which rooms are partially stated and may need to be filtered out # depending on the `required_state` requested (see below). - partial_state_room_map = await self.store.is_partial_state_room_batched( - sync_config.room_subscriptions.keys() - ) + partial_state_rooms = await self.store.get_partial_rooms() for ( room_id, @@ -624,7 +612,7 @@ class SlidingSyncRoomLists: # Exclude partially-stated rooms if we must wait for the room to be # fully-stated if room_sync_config.must_await_full_state(self.is_mine_id): - if partial_state_room_map.get(room_id): + if room_id in partial_state_rooms: continue all_rooms.add(room_id) diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 68b0806041..e0b7b7e194 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1382,6 +1382,30 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): partial_state_rooms = {row[0] for row in rows} return {room_id: room_id in partial_state_rooms for room_id in room_ids} + @cached(max_entries=10000, iterable=True) + async def get_partial_rooms(self) -> AbstractSet[str]: + """Get any "partial-state" rooms which the user is in. + + This is fast as the set of partially stated rooms at any point across + the whole server is small, and so such a query is fast. This is also + faster than looking up whether a set of room ID's are partially stated + via `is_partial_state_room_batched(...)` because of the sheer amount of + CPU time looking all the rooms up in the cache. + """ + + def _get_partial_rooms_for_user_txn( + txn: LoggingTransaction, + ) -> AbstractSet[str]: + sql = """ + SELECT room_id FROM partial_state_rooms + """ + txn.execute(sql) + return {room_id for (room_id,) in txn} + + return await self.db_pool.runInteraction( + "get_partial_rooms_for_user", _get_partial_rooms_for_user_txn + ) + async def get_join_event_id_and_device_lists_stream_id_for_partial_state( self, room_id: str ) -> Tuple[str, int]: @@ -2341,6 +2365,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): self._invalidate_cache_and_stream( txn, self._get_partial_state_servers_at_join, (room_id,) ) + self._invalidate_all_cache_and_stream(txn, self.get_partial_rooms) async def write_partial_state_rooms_join_event_id( self, @@ -2562,6 +2587,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): self._invalidate_cache_and_stream( txn, self._get_partial_state_servers_at_join, (room_id,) ) + self._invalidate_all_cache_and_stream(txn, self.get_partial_rooms) DatabasePool.simple_insert_txn( txn, From a708e1afd0458120cd0a70609b7254ebd634da03 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Sep 2024 11:44:13 +0100 Subject: [PATCH 212/332] Small performance improvements for sliding sync (#17672) A couple of small performance improvements for sliding sync. --- changelog.d/17672.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 15 ++++++++++----- synapse/types/handlers/sliding_sync.py | 8 +++++++- .../client/sliding_sync/test_rooms_meta.py | 19 ++++++++----------- 4 files changed, 26 insertions(+), 17 deletions(-) create mode 100644 changelog.d/17672.misc diff --git a/changelog.d/17672.misc b/changelog.d/17672.misc new file mode 100644 index 0000000000..3550679247 --- /dev/null +++ b/changelog.d/17672.misc @@ -0,0 +1 @@ +Small performance improvement in speeding up sliding sync. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 8d6d8be44f..165b15c60f 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -350,13 +350,18 @@ class SlidingSyncRoomLists: all_rooms.update(filtered_sync_room_map) - # Sort the list - sorted_room_info = await self.sort_rooms_using_tables( - filtered_sync_room_map, to_token - ) - ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] + if list_config.ranges: + if list_config.ranges == [(0, len(filtered_sync_room_map) - 1)]: + # If we are asking for the full range, we don't need to sort the list. + sorted_room_info = list(filtered_sync_room_map.values()) + else: + # Sort the list + sorted_room_info = await self.sort_rooms_using_tables( + filtered_sync_room_map, to_token + ) + for range in list_config.ranges: room_ids_in_list: List[str] = [] diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index 84a88bf784..9d934dd563 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -19,6 +19,7 @@ from enum import Enum from typing import ( TYPE_CHECKING, AbstractSet, + Any, Callable, Dict, Final, @@ -703,7 +704,12 @@ class HaveSentRoom(Generic[T]): @staticmethod def never() -> "HaveSentRoom[T]": - return HaveSentRoom(HaveSentRoomFlag.NEVER, None) + # We use a singleton to avoid repeatedly instantiating new `never` + # values. + return _HAVE_SENT_ROOM_NEVER + + +_HAVE_SENT_ROOM_NEVER: HaveSentRoom[Any] = HaveSentRoom(HaveSentRoomFlag.NEVER, None) @attr.s(auto_attribs=True, slots=True, frozen=True) diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 4ed49040c1..8ce5e8995e 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -588,19 +588,16 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): ) # Make sure the list includes the rooms in the right order - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 1], - # room1 sorts before room2 because it has the latest event (the - # reaction) - "room_ids": [room_id1, room_id2], - } - ], + self.assertEqual( + len(response_body["lists"]["foo-list"]["ops"]), + 1, response_body["lists"]["foo-list"], ) + op = response_body["lists"]["foo-list"]["ops"][0] + self.assertEqual(op["op"], "SYNC") + self.assertEqual(op["range"], [0, 1]) + # Note that we don't order the ops anymore, so we need to compare sets. + self.assertIncludes(set(op["room_ids"]), {room_id1, room_id2}, exact=True) # The `bump_stamp` for room1 should point at the latest message (not the # reaction since it's not one of the `DEFAULT_BUMP_EVENT_TYPES`) From e5d07bb0830eea056ea85863bca3d5b093d43a88 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Sep 2024 11:44:37 +0100 Subject: [PATCH 213/332] Fix bump stamp for non-joined rooms (#17674) We should only look for bump stamps in joined rooms, otherwise we should just use the membership stream ordering. --- changelog.d/17674.bugfix | 1 + synapse/handlers/sliding_sync/__init__.py | 38 ++++++++-------- .../client/sliding_sync/test_rooms_meta.py | 45 +++++++++++++++++++ 3 files changed, 66 insertions(+), 18 deletions(-) create mode 100644 changelog.d/17674.bugfix diff --git a/changelog.d/17674.bugfix b/changelog.d/17674.bugfix new file mode 100644 index 0000000000..bbef5005a1 --- /dev/null +++ b/changelog.d/17674.bugfix @@ -0,0 +1 @@ +Fix bug where we returned the wrong `bump_stamp` for invites in sliding sync response, causing incorrect ordering of invites in the room list. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 0f06ffaa11..7f084cb916 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -975,27 +975,29 @@ class SlidingSyncHandler: ) ) - # Figure out the last bump event in the room - last_bump_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, - to_token.room_key, - event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, - ) - ) - # By default, just choose the membership event position bump_stamp = room_membership_for_user_at_to_token.event_pos.stream - # But if we found a bump event, use that instead - if last_bump_event_result is not None: - _, new_bump_event_pos = last_bump_event_result - # If we've just joined a remote room, then the last bump event may - # have been backfilled (and so have a negative stream ordering). - # These negative stream orderings can't sensibly be compared, so - # instead we use the membership event position. - if new_bump_event_pos.stream > 0: - bump_stamp = new_bump_event_pos.stream + # Figure out the last bump event in the room if we're in the room. + if room_membership_for_user_at_to_token.membership == Membership.JOIN: + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) + ) + + # But if we found a bump event, use that instead + if last_bump_event_result is not None: + _, new_bump_event_pos = last_bump_event_result + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_event_pos.stream > 0: + bump_stamp = new_bump_event_pos.stream unstable_expanded_timeline = False prev_room_sync_config = previous_connection_state.room_configs.get(room_id) diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 8ce5e8995e..aac2e60586 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -755,3 +755,48 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0) + + def test_rooms_bump_stamp_invites(self) -> None: + """ + Test that `bump_stamp` is present and points to the membership event, + and not later events, for non-joined rooms + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + ) + + # Invite user1 to the room + invite_response = self.helper.invite(room_id, user2_id, user1_id, tok=user2_tok) + + # More messages happen after the invite + self.helper.send(room_id, "message in room1", tok=user2_tok) + + # We expect the bump_stamp to match the invite. + invite_pos = self.get_success( + self.store.get_position_for_event(invite_response["event_id"]) + ) + + # Doing an SS request should return a `bump_stamp` of the invite event, + # rather than the message that was sent after. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 5, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertEqual( + response_body["rooms"][room_id]["bump_stamp"], invite_pos.stream + ) From 5389374ef8a9222bd821c88862ff654e42ef83a4 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 9 Sep 2024 04:36:22 -0500 Subject: [PATCH 214/332] Sliding Sync: Speed up incremental sync by avoiding extra work (#17665) Speed up incremental sync by avoiding extra work. We first look at the state delta changes and only fetch and calculate further derived things if they have changed. --- changelog.d/17665.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 151 ++++++-- synapse/rest/client/sync.py | 8 +- synapse/types/handlers/sliding_sync.py | 10 +- .../client/sliding_sync/test_rooms_meta.py | 349 +++++++++++++++++- 5 files changed, 472 insertions(+), 47 deletions(-) create mode 100644 changelog.d/17665.misc diff --git a/changelog.d/17665.misc b/changelog.d/17665.misc new file mode 100644 index 0000000000..28921087a6 --- /dev/null +++ b/changelog.d/17665.misc @@ -0,0 +1 @@ +Speed up incremental Sliding Sync requests by avoiding extra work. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 7f084cb916..444cc32f36 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -44,6 +44,7 @@ from synapse.storage.roommember import ( ) from synapse.types import ( JsonDict, + MutableStateMap, PersistedEventPosition, Requester, RoomStreamToken, @@ -753,26 +754,78 @@ class SlidingSyncHandler: # indicate to the client that a state reset happened. Perhaps we should indicate # this by setting `initial: True` and empty `required_state`. - # Check whether the room has a name set - name_state_ids = await self.get_current_state_ids_at( - room_id=room_id, - room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, - state_filter=StateFilter.from_types([(EventTypes.Name, "")]), - to_token=to_token, - ) - name_event_id = name_state_ids.get((EventTypes.Name, "")) - - room_membership_summary: Mapping[str, MemberSummary] - empty_membership_summary = MemberSummary([], 0) - if room_membership_for_user_at_to_token.membership in ( - Membership.LEAVE, - Membership.BAN, - ): - # TODO: Figure out how to get the membership summary for left/banned rooms - room_membership_summary = {} + # Get the changes to current state in the token range from the + # `current_state_delta_stream` table. + # + # For incremental syncs, we can do this first to determine if something relevant + # has changed and strategically avoid fetching other costly things. + room_state_delta_id_map: MutableStateMap[str] = {} + name_event_id: Optional[str] = None + membership_changed = False + name_changed = False + avatar_changed = False + if initial: + # Check whether the room has a name set + name_state_ids = await self.get_current_state_ids_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=StateFilter.from_types([(EventTypes.Name, "")]), + to_token=to_token, + ) + name_event_id = name_state_ids.get((EventTypes.Name, "")) else: - room_membership_summary = await self.store.get_room_summary(room_id) - # TODO: Reverse/rewind back to the `to_token` + assert from_bound is not None + + # TODO: Limit the number of state events we're about to send down + # the room, if its too many we should change this to an + # `initial=True`? + deltas = await self.store.get_current_state_deltas_for_room( + room_id=room_id, + from_token=from_bound, + to_token=to_token.room_key, + ) + for delta in deltas: + # TODO: Handle state resets where event_id is None + if delta.event_id is not None: + room_state_delta_id_map[(delta.event_type, delta.state_key)] = ( + delta.event_id + ) + + if delta.event_type == EventTypes.Member: + membership_changed = True + elif delta.event_type == EventTypes.Name and delta.state_key == "": + name_changed = True + elif ( + delta.event_type == EventTypes.RoomAvatar and delta.state_key == "" + ): + avatar_changed = True + + room_membership_summary: Optional[Mapping[str, MemberSummary]] = None + empty_membership_summary = MemberSummary([], 0) + # We need the room summary for: + # - Always for initial syncs (or the first time we send down the room) + # - When the room has no name, we need `heroes` + # - When the membership has changed so we need to give updated `heroes` and + # `joined_count`/`invited_count`. + # + # Ideally, instead of just looking at `name_changed`, we'd check if the room + # name is not set but this is a good enough approximation that saves us from + # having to pull out the full event. This just means, we're generating the + # summary whenever the room name changes instead of only when it changes to + # `None`. + if initial or name_changed or membership_changed: + # We can't trace the function directly because it's cached and the `@cached` + # decorator doesn't mix with `@trace` yet. + with start_active_span("get_room_summary"): + if room_membership_for_user_at_to_token.membership in ( + Membership.LEAVE, + Membership.BAN, + ): + # TODO: Figure out how to get the membership summary for left/banned rooms + room_membership_summary = {} + else: + room_membership_summary = await self.store.get_room_summary(room_id) + # TODO: Reverse/rewind back to the `to_token` # `heroes` are required if the room name is not set. # @@ -786,7 +839,12 @@ class SlidingSyncHandler: # TODO: Should we also check for `EventTypes.CanonicalAlias` # (`m.room.canonical_alias`) as a fallback for the room name? see # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 - if name_event_id is None: + # + # We need to fetch the `heroes` if the room name is not set. But we only need to + # get them on initial syncs (or the first time we send down the room) or if the + # membership has changed which may change the heroes. + if name_event_id is None and (initial or (not initial and membership_changed)): + assert room_membership_summary is not None hero_user_ids = extract_heroes_from_room_summary( room_membership_summary, me=user.to_string() ) @@ -904,9 +962,15 @@ class SlidingSyncHandler: # We need this base set of info for the response so let's just fetch it along # with the `required_state` for the room - meta_room_state = [(EventTypes.Name, ""), (EventTypes.RoomAvatar, "")] + [ + hero_room_state = [ (EventTypes.Member, hero_user_id) for hero_user_id in hero_user_ids ] + meta_room_state = list(hero_room_state) + if initial or name_changed: + meta_room_state.append((EventTypes.Name, "")) + if initial or avatar_changed: + meta_room_state.append((EventTypes.RoomAvatar, "")) + state_filter = StateFilter.all() if required_state_filter != StateFilter.all(): state_filter = StateFilter( @@ -929,21 +993,22 @@ class SlidingSyncHandler: else: assert from_bound is not None - # TODO: Limit the number of state events we're about to send down - # the room, if its too many we should change this to an - # `initial=True`? - deltas = await self.store.get_current_state_deltas_for_room( - room_id=room_id, - from_token=from_bound, - to_token=to_token.room_key, - ) - # TODO: Filter room state before fetching events - # TODO: Handle state resets where event_id is None events = await self.store.get_events( - [d.event_id for d in deltas if d.event_id] + state_filter.filter_state(room_state_delta_id_map).values() ) room_state = {(s.type, s.state_key): s for s in events.values()} + # If the membership changed and we have to get heroes, get the remaining + # heroes from the state + if hero_user_ids: + hero_membership_state = await self.get_current_state_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=StateFilter.from_types(hero_room_state), + to_token=to_token, + ) + room_state.update(hero_membership_state) + required_room_state: StateMap[EventBase] = {} if required_state_filter != StateFilter.none(): required_room_state = required_state_filter.filter_state(room_state) @@ -1050,6 +1115,20 @@ class SlidingSyncHandler: set_tag(SynapseTags.RESULT_PREFIX + "initial", initial) + joined_count: Optional[int] = None + if initial or membership_changed: + assert room_membership_summary is not None + joined_count = room_membership_summary.get( + Membership.JOIN, empty_membership_summary + ).count + + invited_count: Optional[int] = None + if initial or membership_changed: + assert room_membership_summary is not None + invited_count = room_membership_summary.get( + Membership.INVITE, empty_membership_summary + ).count + return SlidingSyncResult.RoomResult( name=room_name, avatar=room_avatar, @@ -1065,12 +1144,8 @@ class SlidingSyncHandler: unstable_expanded_timeline=unstable_expanded_timeline, num_live=num_live, bump_stamp=bump_stamp, - joined_count=room_membership_summary.get( - Membership.JOIN, empty_membership_summary - ).count, - invited_count=room_membership_summary.get( - Membership.INVITE, empty_membership_summary - ).count, + joined_count=joined_count, + invited_count=invited_count, # TODO: These are just dummy values. We could potentially just remove these # since notifications can only really be done correctly on the client anyway # (encrypted rooms). diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index cc9fbfe546..9e2bf98189 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -1011,12 +1011,16 @@ class SlidingSyncRestServlet(RestServlet): for room_id, room_result in rooms.items(): serialized_rooms[room_id] = { "bump_stamp": room_result.bump_stamp, - "joined_count": room_result.joined_count, - "invited_count": room_result.invited_count, "notification_count": room_result.notification_count, "highlight_count": room_result.highlight_count, } + if room_result.joined_count is not None: + serialized_rooms[room_id]["joined_count"] = room_result.joined_count + + if room_result.invited_count is not None: + serialized_rooms[room_id]["invited_count"] = room_result.invited_count + if room_result.name: serialized_rooms[room_id]["name"] = room_result.name diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index 9d934dd563..e1b2af7a03 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -197,8 +197,8 @@ class SlidingSyncResult: # Only optional because it won't be included for invite/knock rooms with `stripped_state` num_live: Optional[int] bump_stamp: int - joined_count: int - invited_count: int + joined_count: Optional[int] + invited_count: Optional[int] notification_count: int highlight_count: int @@ -207,6 +207,12 @@ class SlidingSyncResult: # If this is the first time the client is seeing the room, we should not filter it out # under any circumstance. self.initial + # We need to let the client know if any of the info has changed + or self.name is not None + or self.avatar is not None + or bool(self.heroes) + or self.joined_count is not None + or self.invited_count is not None # We need to let the client know if there are any new events or bool(self.required_state) or bool(self.timeline_events) diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index aac2e60586..6d2742e25f 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -13,7 +13,7 @@ # import logging -from parameterized import parameterized_class +from parameterized import parameterized, parameterized_class from twisted.test.proto_helpers import MemoryReactor @@ -67,10 +67,11 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): super().prepare(reactor, clock, hs) - def test_rooms_meta_when_joined(self) -> None: + def test_rooms_meta_when_joined_initial(self) -> None: """ - Test that the `rooms` `name` and `avatar` are included in the response and - reflect the current state of the room when the user is joined to the room. + Test that the `rooms` `name` and `avatar` are included in the initial sync + response and reflect the current state of the room when the user is joined to + the room. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -107,6 +108,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Reflect the current state of the room + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertEqual( response_body["rooms"][room_id1]["name"], "my super room", @@ -129,6 +131,178 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body["rooms"][room_id1].get("is_dm"), ) + def test_rooms_meta_when_joined_incremental_no_change(self) -> None: + """ + Test that the `rooms` `name` and `avatar` aren't included in an incremental sync + response if they haven't changed. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + # This needs to be set to one so the `RoomResult` isn't empty and + # the room comes down incremental sync when we send a new message. + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send a message to make the room come down sync + self.helper.send(room_id1, "message in room1", tok=user2_tok) + + # Incremental sync + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # We should only see changed meta info (nothing changed so we shouldn't see any + # of these fields) + self.assertNotIn( + "initial", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "name", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "avatar", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], + ) + self.assertIsNone( + response_body["rooms"][room_id1].get("is_dm"), + ) + + @parameterized.expand( + [ + ("in_required_state", True), + ("not_in_required_state", False), + ] + ) + def test_rooms_meta_when_joined_incremental_with_state_change( + self, test_description: str, include_changed_state_in_required_state: bool + ) -> None: + """ + Test that the `rooms` `name` and `avatar` are included in an incremental sync + response if they changed. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": ( + [[EventTypes.Name, ""], [EventTypes.RoomAvatar, ""]] + # Conditionally include the changed state in the + # `required_state` to make sure whether we request it or not, + # the new room name still flows down to the client. + if include_changed_state_in_required_state + else [] + ), + "timeline_limit": 0, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Update the room name + self.helper.send_state( + room_id1, + EventTypes.Name, + {EventContentFields.ROOM_NAME: "my super duper room"}, + tok=user2_tok, + ) + # Update the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID_UPDATED"}, + tok=user2_tok, + ) + + # Incremental sync + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # We should only see changed meta info (the room name and avatar) + self.assertNotIn( + "initial", + response_body["rooms"][room_id1], + ) + self.assertEqual( + response_body["rooms"][room_id1]["name"], + "my super duper room", + response_body["rooms"][room_id1], + ) + self.assertEqual( + response_body["rooms"][room_id1]["avatar"], + "mxc://DUMMY_MEDIA_ID_UPDATED", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], + ) + self.assertIsNone( + response_body["rooms"][room_id1].get("is_dm"), + ) + def test_rooms_meta_when_invited(self) -> None: """ Test that the `rooms` `name` and `avatar` are included in the response and @@ -186,6 +360,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): # This should still reflect the current state of the room even when the user is # invited. + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertEqual( response_body["rooms"][room_id1]["name"], "my super duper room", @@ -264,6 +439,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Reflect the state of the room at the time of leaving + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertEqual( response_body["rooms"][room_id1]["name"], "my super room", @@ -338,6 +514,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): # Room1 has a name so we shouldn't see any `heroes` which the client would use # the calculate the room name themselves. + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertEqual( response_body["rooms"][room_id1]["name"], "my super room", @@ -354,6 +531,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): ) # Room2 doesn't have a name so we should see `heroes` populated + self.assertEqual(response_body["rooms"][room_id2]["initial"], True) self.assertIsNone(response_body["rooms"][room_id2].get("name")) self.assertCountEqual( [ @@ -425,6 +603,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Room2 doesn't have a name so we should see `heroes` populated + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertIsNone(response_body["rooms"][room_id1].get("name")) self.assertCountEqual( [ @@ -497,7 +676,8 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): } response_body, _ = self.do_sync(sync_body, tok=user1_tok) - # Room2 doesn't have a name so we should see `heroes` populated + # Room doesn't have a name so we should see `heroes` populated + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertIsNone(response_body["rooms"][room_id1].get("name")) self.assertCountEqual( [ @@ -527,6 +707,165 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): 0, ) + def test_rooms_meta_heroes_incremental_sync_no_change(self) -> None: + """ + Test that the `rooms` `heroes` aren't included in an incremental sync + response if they haven't changed. + + (when the room doesn't have a room name set) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + + room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room2", + }, + ) + self.helper.join(room_id, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id, src=user2_id, targ=user3_id, tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + # This needs to be set to one so the `RoomResult` isn't empty and + # the room comes down incremental sync when we send a new message. + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send a message to make the room come down sync + self.helper.send(room_id, "message in room", tok=user2_tok) + + # Incremental sync + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # This is an incremental sync and the second time we have seen this room so it + # isn't `initial` + self.assertNotIn( + "initial", + response_body["rooms"][room_id], + ) + # Room shouldn't have a room name because we're testing the `heroes` field which + # will only has a chance to appear if the room doesn't have a name. + self.assertNotIn( + "name", + response_body["rooms"][room_id], + ) + # No change to heroes + self.assertNotIn( + "heroes", + response_body["rooms"][room_id], + ) + # No change to member counts + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id], + ) + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id], + ) + # We didn't request any state so we shouldn't see any `required_state` + self.assertNotIn( + "required_state", + response_body["rooms"][room_id], + ) + + def test_rooms_meta_heroes_incremental_sync_with_membership_change(self) -> None: + """ + Test that the `rooms` `heroes` are included in an incremental sync response if + the membership has changed. + + (when the room doesn't have a room name set) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room2", + }, + ) + self.helper.join(room_id, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id, src=user2_id, targ=user3_id, tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # User3 joins (membership change) + self.helper.join(room_id, user3_id, tok=user3_tok) + + # Incremental sync + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # This is an incremental sync and the second time we have seen this room so it + # isn't `initial` + self.assertNotIn( + "initial", + response_body["rooms"][room_id], + ) + # Room shouldn't have a room name because we're testing the `heroes` field which + # will only has a chance to appear if the room doesn't have a name. + self.assertNotIn( + "name", + response_body["rooms"][room_id], + ) + # Membership change so we should see heroes and membership counts + self.assertCountEqual( + [ + hero["user_id"] + for hero in response_body["rooms"][room_id].get("heroes", []) + ], + # Heroes shouldn't include the user themselves (we shouldn't see user1) + [user2_id, user3_id], + ) + self.assertEqual( + response_body["rooms"][room_id]["joined_count"], + 3, + ) + self.assertEqual( + response_body["rooms"][room_id]["invited_count"], + 0, + ) + # We didn't request any state so we shouldn't see any `required_state` + self.assertNotIn( + "required_state", + response_body["rooms"][room_id], + ) + def test_rooms_bump_stamp(self) -> None: """ Test that `bump_stamp` is present and pointing to relevant events. From a3c49565fff95cb332ef5f00b6faaf4803b34153 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 Sep 2024 11:58:18 +0100 Subject: [PATCH 215/332] Look for bump stamp in the room timeline This allows us to skip checking the database a lot of the time. --- synapse/handlers/sliding_sync/__init__.py | 36 +++++++++++++++-------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 444cc32f36..bef3cb38f6 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -1045,24 +1045,36 @@ class SlidingSyncHandler: # Figure out the last bump event in the room if we're in the room. if room_membership_for_user_at_to_token.membership == Membership.JOIN: - last_bump_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, - to_token.room_key, - event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + new_bump_stamp: Optional[int] = None + + # First check the timeline events we're returning to see if one of + # those matches. We iterate backwards and take the stream ordering + # of the first event that matches the bump event types. + for timeline_event in reversed(timeline_events): + if timeline_event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES: + new_bump_stamp = timeline_event.internal_metadata.stream_ordering + break + else: + # If not then we query the DB for it. + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) ) - ) - - # But if we found a bump event, use that instead - if last_bump_event_result is not None: - _, new_bump_event_pos = last_bump_event_result + if last_bump_event_result is not None: + _, new_bump_event_pos = last_bump_event_result + new_bump_stamp = new_bump_event_pos.stream + # If we found a bump event, use that instead + if new_bump_stamp is not None: # If we've just joined a remote room, then the last bump event may # have been backfilled (and so have a negative stream ordering). # These negative stream orderings can't sensibly be compared, so # instead we use the membership event position. - if new_bump_event_pos.stream > 0: - bump_stamp = new_bump_event_pos.stream + if new_bump_stamp > 0: + bump_stamp = new_bump_stamp unstable_expanded_timeline = False prev_room_sync_config = previous_connection_state.room_configs.get(room_id) From 5c229415c482f15a7f45b839ceaed0cbc723057c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 Sep 2024 11:58:50 +0100 Subject: [PATCH 216/332] Revert "Look for bump stamp in the room timeline" This reverts commit a3c49565fff95cb332ef5f00b6faaf4803b34153. --- synapse/handlers/sliding_sync/__init__.py | 36 ++++++++--------------- 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index bef3cb38f6..444cc32f36 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -1045,36 +1045,24 @@ class SlidingSyncHandler: # Figure out the last bump event in the room if we're in the room. if room_membership_for_user_at_to_token.membership == Membership.JOIN: - new_bump_stamp: Optional[int] = None - - # First check the timeline events we're returning to see if one of - # those matches. We iterate backwards and take the stream ordering - # of the first event that matches the bump event types. - for timeline_event in reversed(timeline_events): - if timeline_event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES: - new_bump_stamp = timeline_event.internal_metadata.stream_ordering - break - else: - # If not then we query the DB for it. - last_bump_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, - to_token.room_key, - event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, - ) + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, ) - if last_bump_event_result is not None: - _, new_bump_event_pos = last_bump_event_result - new_bump_stamp = new_bump_event_pos.stream + ) + + # But if we found a bump event, use that instead + if last_bump_event_result is not None: + _, new_bump_event_pos = last_bump_event_result - # If we found a bump event, use that instead - if new_bump_stamp is not None: # If we've just joined a remote room, then the last bump event may # have been backfilled (and so have a negative stream ordering). # These negative stream orderings can't sensibly be compared, so # instead we use the membership event position. - if new_bump_stamp > 0: - bump_stamp = new_bump_stamp + if new_bump_event_pos.stream > 0: + bump_stamp = new_bump_event_pos.stream unstable_expanded_timeline = False prev_room_sync_config = previous_connection_state.room_configs.get(room_id) From e1ed959a68f8039130be821c27e82e75b5d59e5f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 9 Sep 2024 10:41:25 -0500 Subject: [PATCH 217/332] Sliding Sync: Get `bump_stamp` from new sliding sync tables because it's faster (#17658) Get `bump_stamp` from [new sliding sync tables](https://github.com/element-hq/synapse/pull/17512) which should be faster (performance) than flipping through the latest events in the room. --- changelog.d/17658.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 74 ++++-- synapse/storage/databases/main/events.py | 61 ++--- .../storage/databases/main/sliding_sync.py | 40 ++++ tests/storage/test_sliding_sync_tables.py | 213 +++++++++++++++++- 5 files changed, 333 insertions(+), 56 deletions(-) create mode 100644 changelog.d/17658.misc diff --git a/changelog.d/17658.misc b/changelog.d/17658.misc new file mode 100644 index 0000000000..0bdbc1140d --- /dev/null +++ b/changelog.d/17658.misc @@ -0,0 +1 @@ +Get `bump_stamp` from [new sliding sync tables](https://github.com/element-hq/synapse/pull/17512) which should be faster. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 444cc32f36..7340c6ec05 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -1040,29 +1040,67 @@ class SlidingSyncHandler: ) ) - # By default, just choose the membership event position + # Figure out the last bump event in the room + # + # By default, just choose the membership event position for any non-join membership bump_stamp = room_membership_for_user_at_to_token.event_pos.stream - - # Figure out the last bump event in the room if we're in the room. + # If we're joined to the room, we need to find the last bump event before the + # `to_token` if room_membership_for_user_at_to_token.membership == Membership.JOIN: - last_bump_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, - to_token.room_key, - event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, - ) + # We can quickly query for the latest bump event in the room using the + # sliding sync tables. + latest_room_bump_stamp = await self.store.get_latest_bump_stamp_for_room( + room_id ) - # But if we found a bump event, use that instead - if last_bump_event_result is not None: - _, new_bump_event_pos = last_bump_event_result + min_to_token_position = to_token.room_key.stream - # If we've just joined a remote room, then the last bump event may - # have been backfilled (and so have a negative stream ordering). - # These negative stream orderings can't sensibly be compared, so - # instead we use the membership event position. - if new_bump_event_pos.stream > 0: - bump_stamp = new_bump_event_pos.stream + # If we can rely on the new sliding sync tables and the `bump_stamp` is + # `None`, just fallback to the membership event position. This can happen + # when we've just joined a remote room and all the events are backfilled. + if ( + # FIXME: The background job check can be removed once we bump + # `SCHEMA_COMPAT_VERSION` and run the foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` + # (tracked by https://github.com/element-hq/synapse/issues/17623) + await self.store.have_finished_sliding_sync_background_jobs() + and latest_room_bump_stamp is None + ): + pass + + # The `bump_stamp` stored in the database might be ahead of our token. Since + # `bump_stamp` is only a `stream_ordering` position, we can't be 100% sure + # that's before the `to_token` in all scenarios. The only scenario we can be + # sure of is if the `bump_stamp` is totally before the minimum position from + # the token. + # + # We don't need to check if the background update has finished, as if the + # returned bump stamp is not None then it must be up to date. + elif ( + latest_room_bump_stamp is not None + and latest_room_bump_stamp < min_to_token_position + ): + bump_stamp = latest_room_bump_stamp + + # Otherwise, if it's within or after the `to_token`, we need to find the + # last bump event before the `to_token`. + else: + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) + ) + if last_bump_event_result is not None: + _, new_bump_event_pos = last_bump_event_result + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_event_pos.stream > 0: + bump_stamp = new_bump_event_pos.stream unstable_expanded_timeline = False prev_room_sync_config = previous_connection_state.room_configs.get(room_id) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index d423d80efa..e5f63019fd 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -327,6 +327,13 @@ class PersistEventsStore: async with stream_ordering_manager as stream_orderings: for (event, _), stream in zip(events_and_contexts, stream_orderings): + # XXX: We can't rely on `stream_ordering`/`instance_name` being correct + # at this point. We could be working with events that were previously + # persisted as an `outlier` with one `stream_ordering` but are now being + # persisted again and de-outliered and are being assigned a different + # `stream_ordering` here that won't end up being used. + # `_update_outliers_txn()` will fix this discrepancy (always use the + # `stream_ordering` from the first time it was persisted). event.internal_metadata.stream_ordering = stream event.internal_metadata.instance_name = self._instance_name @@ -470,11 +477,11 @@ class PersistEventsStore: membership_infos_to_insert_membership_snapshots.append( # XXX: We don't use `SlidingSyncMembershipInfoWithEventPos` here # because we're sourcing the event from `events_and_contexts`, we - # can't rely on `stream_ordering`/`instance_name` being correct. We - # could be working with events that were previously persisted as an - # `outlier` with one `stream_ordering` but are now being persisted - # again and de-outliered and assigned a different `stream_ordering` - # that won't end up being used. Since we call + # can't rely on `stream_ordering`/`instance_name` being correct at + # this point. We could be working with events that were previously + # persisted as an `outlier` with one `stream_ordering` but are now + # being persisted again and de-outliered and assigned a different + # `stream_ordering` that won't end up being used. Since we call # `_calculate_sliding_sync_table_changes()` before # `_update_outliers_txn()` which fixes this discrepancy (always use # the `stream_ordering` from the first time it was persisted), we're @@ -591,11 +598,17 @@ class PersistEventsStore: event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, ) ) - bump_stamp_to_fully_insert = ( - most_recent_bump_event_pos_results[1].stream - if most_recent_bump_event_pos_results is not None - else None - ) + if most_recent_bump_event_pos_results is not None: + _, new_bump_event_pos = most_recent_bump_event_pos_results + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead just leave it as `None` in the table and we will use their + # membership event position as the bump event position in the + # Sliding Sync API. + if new_bump_event_pos.stream > 0: + bump_stamp_to_fully_insert = new_bump_event_pos.stream current_state_ids_map = dict( await self.store.get_partial_filtered_current_state_ids( @@ -2123,31 +2136,26 @@ class PersistEventsStore: if len(events_and_contexts) == 0: return - # We only update the sliding sync tables for non-backfilled events. - # - # Check if the first event is a backfilled event (with a negative - # `stream_ordering`). If one event is backfilled, we assume this whole batch was - # backfilled. - first_event_stream_ordering = events_and_contexts[0][ - 0 - ].internal_metadata.stream_ordering - # This should exist for persisted events - assert first_event_stream_ordering is not None - if first_event_stream_ordering < 0: - return - # Since the list is sorted ascending by `stream_ordering`, the last event should # have the highest `stream_ordering`. max_stream_ordering = events_and_contexts[-1][ 0 ].internal_metadata.stream_ordering + # `stream_ordering` should be assigned for persisted events + assert max_stream_ordering is not None + # Check if the event is a backfilled event (with a negative `stream_ordering`). + # If one event is backfilled, we assume this whole batch was backfilled. + if max_stream_ordering < 0: + # We only update the sliding sync tables for non-backfilled events. + return + max_bump_stamp = None for event, _ in reversed(events_and_contexts): # Sanity check that all events belong to the same room assert event.room_id == room_id if event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES: - # This should exist for persisted events + # `stream_ordering` should be assigned for persisted events assert event.internal_metadata.stream_ordering is not None max_bump_stamp = event.internal_metadata.stream_ordering @@ -2156,11 +2164,6 @@ class PersistEventsStore: # matching bump event which should have the highest `stream_ordering`. break - # We should have exited earlier if there were no events - assert ( - max_stream_ordering is not None - ), "Expected to have a stream_ordering if we have events" - # Handle updating the `sliding_sync_joined_rooms` table. # txn.execute( diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index dc747d7ac0..83939d10b0 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -41,6 +41,46 @@ logger = logging.getLogger(__name__) class SlidingSyncStore(SQLBaseStore): + async def get_latest_bump_stamp_for_room( + self, + room_id: str, + ) -> Optional[int]: + """ + Get the `bump_stamp` for the room. + + The `bump_stamp` is the `stream_ordering` of the last event according to the + `bump_event_types`. This helps clients sort more readily without them needing to + pull in a bunch of the timeline to determine the last activity. + `bump_event_types` is a thing because for example, we don't want display name + changes to mark the room as unread and bump it to the top. For encrypted rooms, + we just have to consider any activity as a bump because we can't see the content + and the client has to figure it out for themselves. + + This should only be called where the server is participating + in the room (someone local is joined). + + Returns: + The `bump_stamp` for the room (which can be `None`). + """ + + return cast( + Optional[int], + await self.db_pool.simple_select_one_onecol( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + retcol="bump_stamp", + # FIXME: This should be `False` once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked + # by https://github.com/element-hq/synapse/issues/17623) + # + # The should be `allow_none=False` in the future because event though + # `bump_stamp` itself can be `None`, we should have a row in the + # `sliding_sync_joined_rooms` table for any joined room. + allow_none=True, + ), + ) + async def persist_per_connection_state( self, user_id: str, diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py index 621f46fff8..de80ad53cd 100644 --- a/tests/storage/test_sliding_sync_tables.py +++ b/tests/storage/test_sliding_sync_tables.py @@ -106,6 +106,12 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): assert persist_events_store is not None self.persist_events_store = persist_events_store + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + self.persist_controller = persist_controller + + self.state_handler = self.hs.get_state_handler() + def _get_sliding_sync_joined_rooms(self) -> Dict[str, _SlidingSyncJoinedRoomResult]: """ Return the rows from the `sliding_sync_joined_rooms` table. @@ -260,10 +266,8 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): ) ) context = EventContext.for_outlier(self.hs.get_storage_controllers()) - persist_controller = self.hs.get_storage_controllers().persistence - assert persist_controller is not None persisted_event, _, _ = self.get_success( - persist_controller.persist_event(invite_event, context) + self.persist_controller.persist_event(invite_event, context) ) self._remote_invite_count += 1 @@ -316,10 +320,8 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): ) ) context = EventContext.for_outlier(self.hs.get_storage_controllers()) - persist_controller = self.hs.get_storage_controllers().persistence - assert persist_controller is not None persisted_event, _, _ = self.get_success( - persist_controller.persist_event(kick_event, context) + self.persist_controller.persist_event(kick_event, context) ) return persisted_event @@ -926,6 +928,201 @@ class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase): user2_snapshot, ) + def test_joined_room_bump_stamp_backfill(self) -> None: + """ + Test that `bump_stamp` ignores backfilled events, i.e. events with a + negative stream ordering. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote room + creator = "@user:other" + room_id = "!foo:other" + room_version = RoomVersions.V10 + shared_kwargs = { + "room_id": room_id, + "room_version": room_version.identifier, + } + + create_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[], + type=EventTypes.Create, + state_key="", + content={ + # The `ROOM_CREATOR` field could be removed if we used a room + # version > 10 (in favor of relying on `sender`) + EventContentFields.ROOM_CREATOR: creator, + EventContentFields.ROOM_VERSION: room_version.identifier, + }, + sender=creator, + **shared_kwargs, + ) + ) + creator_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[create_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Member, + state_key=creator, + content={"membership": Membership.JOIN}, + sender=creator, + **shared_kwargs, + ) + ) + room_name_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[creator_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Name, + state_key="", + content={ + EventContentFields.ROOM_NAME: "my super duper room", + }, + sender=creator, + **shared_kwargs, + ) + ) + # We add a message event as a valid "bump type" + msg_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[room_name_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Message, + content={"body": "foo", "msgtype": "m.text"}, + sender=creator, + **shared_kwargs, + ) + ) + invite_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[msg_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.INVITE}, + sender=creator, + **shared_kwargs, + ) + ) + + remote_events_and_contexts = [ + create_tuple, + creator_tuple, + room_name_tuple, + msg_tuple, + invite_tuple, + ] + + # Ensure the local HS knows the room version + self.get_success(self.store.store_room(room_id, creator, False, room_version)) + + # Persist these events as backfilled events. + for event, context in remote_events_and_contexts: + self.get_success( + self.persist_controller.persist_event(event, context, backfilled=True) + ) + + # Now we join the local user to the room. We want to make this feel as close to + # the real `process_remote_join()` as possible but we'd like to avoid some of + # the auth checks that would be done in the real code. + # + # FIXME: The test was originally written using this less-real + # `persist_event(...)` shortcut but it would be nice to use the real remote join + # process in a `FederatingHomeserverTestCase`. + flawed_join_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[invite_tuple[0].event_id], + # This doesn't work correctly to create an `EventContext` that includes + # both of these state events. I assume it's because we're working on our + # local homeserver which has the remote state set as `outlier`. We have + # to create our own EventContext below to get this right. + auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.JOIN}, + sender=user1_id, + **shared_kwargs, + ) + ) + # We have to create our own context to get the state set correctly. If we use + # the `EventContext` from the `flawed_join_tuple`, the `current_state_events` + # table will only have the join event in it which should never happen in our + # real server. + join_event = flawed_join_tuple[0] + join_context = self.get_success( + self.state_handler.compute_event_context( + join_event, + state_ids_before_event={ + (e.type, e.state_key): e.event_id + for e in [create_tuple[0], invite_tuple[0], room_name_tuple[0]] + }, + partial_state=False, + ) + ) + join_event, _join_event_pos, _room_token = self.get_success( + self.persist_controller.persist_event(join_event, join_context) + ) + + # Make sure the tables are populated correctly + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be the last event in the room (the join membership) + event_stream_ordering=join_event.internal_metadata.stream_ordering, + # Since all of the bump events are backfilled, the `bump_stamp` should + # still be `None`. (and we will fallback to the users membership event + # position in the Sliding Sync API) + bump_stamp=None, + room_type=None, + # We still pick up state of the room even if it's backfilled + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=join_event.event_id, + membership=Membership.JOIN, + event_stream_ordering=join_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + @parameterized.expand( # Test both an insert an upsert into the # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` to exercise @@ -1036,11 +1233,9 @@ class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase): context = self.get_success(unpersisted_context.persist(event)) events_to_persist.append((event, context)) - persist_controller = self.hs.get_storage_controllers().persistence - assert persist_controller is not None for event, context in events_to_persist: self.get_success( - persist_controller.persist_event( + self.persist_controller.persist_event( event, context, ) From 515c1cc0a1ad7957d5aa2caa8b6423fa93d4193c Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 9 Sep 2024 17:55:59 -0500 Subject: [PATCH 218/332] Sliding Sync: Add comment to explain extra case where you can be invited -> banned -> unbanned (#17654) Add comment to explain extra case where you can be invited -> banned -> unbanned and we want to be able to find the invite event. Follow-up to https://github.com/element-hq/synapse/pull/17636#discussion_r1738993330 --- changelog.d/17654.misc | 1 + synapse/storage/databases/main/events_bg_updates.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17654.misc diff --git a/changelog.d/17654.misc b/changelog.d/17654.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17654.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index e20fc4471e..12670e87d2 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1966,7 +1966,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) return 0 - def _find_previous_membership_txn( + def _find_previous_invite_or_knock_membership_txn( txn: LoggingTransaction, room_id: str, user_id: str, event_id: str ) -> Tuple[str, str]: # Find the previous invite/knock event before the leave event @@ -2007,6 +2007,10 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ( room_id, user_id, + # We look explicitly for `invite` and `knock` events instead of + # just their previous membership as someone could have been `invite` + # -> `ban` -> unbanned (`leave`) and we want to find the `invite` + # event where the stripped state is. Membership.INVITE, Membership.KNOCK, event_id, @@ -2155,8 +2159,8 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS invite_or_knock_event_id, invite_or_knock_membership, ) = await self.db_pool.runInteraction( - "sliding_sync_membership_snapshots_bg_update._find_previous_membership", - _find_previous_membership_txn, + "sliding_sync_membership_snapshots_bg_update._find_previous_invite_or_knock_membership_txn", + _find_previous_invite_or_knock_membership_txn, room_id, user_id, membership_event_id, From 588e5b521d56f8605e2a029626a5e59bb2b1a40f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Sep 2024 09:52:42 +0100 Subject: [PATCH 219/332] Sliding Sync: Retrieve fewer events from DB in sync (#17688) When using timeline limit of 1 we end up fetching 2 events from the DB purely to tell if the response was "limited" or not. Lets not do that. --- changelog.d/17688.misc | 1 + synapse/handlers/admin.py | 1 + synapse/handlers/pagination.py | 2 + synapse/handlers/room.py | 2 +- synapse/handlers/sliding_sync/__init__.py | 20 +--- synapse/handlers/sync.py | 11 +-- synapse/storage/databases/main/stream.py | 93 ++++++++++++------- .../sliding_sync/test_rooms_timeline.py | 48 ++++------ tests/storage/test_stream.py | 2 +- 9 files changed, 89 insertions(+), 91 deletions(-) create mode 100644 changelog.d/17688.misc diff --git a/changelog.d/17688.misc b/changelog.d/17688.misc new file mode 100644 index 0000000000..7ba8d48fbe --- /dev/null +++ b/changelog.d/17688.misc @@ -0,0 +1 @@ +Speed up sync by pulling out fewer events from the database. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index c874d22eac..65b3f153da 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -200,6 +200,7 @@ class AdminHandler: ( events, _, + _, ) = await self._store.paginate_room_events_by_topological_ordering( room_id=room_id, from_key=from_key, diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 3c44458fa3..4070b74b7a 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -510,6 +510,7 @@ class PaginationHandler: ( events, next_key, + _, ) = await self.store.paginate_room_events_by_topological_ordering( room_id=room_id, from_key=from_token.room_key, @@ -588,6 +589,7 @@ class PaginationHandler: ( events, next_key, + _, ) = await self.store.paginate_room_events_by_topological_ordering( room_id=room_id, from_key=from_token.room_key, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 35c88f1b91..386375d64b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1753,7 +1753,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]): ) events = list(room_events) - events.extend(e for evs, _ in room_to_events.values() for e in evs) + events.extend(e for evs, _, _ in room_to_events.values() for e in evs) # We know stream_ordering must be not None here, as its been # persisted, but mypy doesn't know that diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 7340c6ec05..cf368be9d9 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -47,7 +47,6 @@ from synapse.types import ( MutableStateMap, PersistedEventPosition, Requester, - RoomStreamToken, SlidingSyncStreamToken, StateMap, StreamKeyType, @@ -632,7 +631,7 @@ class SlidingSyncHandler: # Use `stream_ordering` for updates else paginate_room_events_by_stream_ordering ) - timeline_events, new_room_key = await pagination_method( + timeline_events, new_room_key, limited = await pagination_method( room_id=room_id, # The bounds are reversed so we can paginate backwards # (from newer to older events) starting at to_bound. @@ -640,28 +639,13 @@ class SlidingSyncHandler: from_key=to_bound, to_key=timeline_from_bound, direction=Direction.BACKWARDS, - # We add one so we can determine if there are enough events to saturate - # the limit or not (see `limited`) - limit=room_sync_config.timeline_limit + 1, + limit=room_sync_config.timeline_limit, ) # We want to return the events in ascending order (the last event is the # most recent). timeline_events.reverse() - # Determine our `limited` status based on the timeline. We do this before - # filtering the events so we can accurately determine if there is more to - # paginate even if we filter out some/all events. - if len(timeline_events) > room_sync_config.timeline_limit: - limited = True - # Get rid of that extra "+ 1" event because we only used it to determine - # if we hit the limit or not - timeline_events = timeline_events[-room_sync_config.timeline_limit :] - assert timeline_events[0].internal_metadata.stream_ordering - new_room_key = RoomStreamToken( - stream=timeline_events[0].internal_metadata.stream_ordering - 1 - ) - # Make sure we don't expose any events that the client shouldn't see timeline_events = await filter_events_for_client( self.storage_controllers, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 609840bfe9..f4ea90fbd7 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -906,7 +906,7 @@ class SyncHandler: # Use `stream_ordering` for updates else paginate_room_events_by_stream_ordering ) - events, end_key = await pagination_method( + events, end_key, limited = await pagination_method( room_id=room_id, # The bounds are reversed so we can paginate backwards # (from newer to older events) starting at to_bound. @@ -914,9 +914,7 @@ class SyncHandler: from_key=end_key, to_key=since_key, direction=Direction.BACKWARDS, - # We add one so we can determine if there are enough events to saturate - # the limit or not (see `limited`) - limit=load_limit + 1, + limit=load_limit, ) # We want to return the events in ascending order (the last event is the # most recent). @@ -971,9 +969,6 @@ class SyncHandler: loaded_recents.extend(recents) recents = loaded_recents - if len(events) <= load_limit: - limited = False - break max_repeat -= 1 if len(recents) > timeline_limit: @@ -2608,7 +2603,7 @@ class SyncHandler: newly_joined = room_id in newly_joined_rooms if room_entry: - events, start_key = room_entry + events, start_key, _ = room_entry # We want to return the events in ascending order (the last event is the # most recent). events.reverse() diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 68d4168621..459436e304 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -108,7 +108,7 @@ class PaginateFunction(Protocol): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Tuple[List[EventBase], RoomStreamToken]: ... + ) -> Tuple[List[EventBase], RoomStreamToken, bool]: ... # Used as return values for pagination APIs @@ -679,7 +679,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken]]: + ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken, bool]]: """Get new room events in stream ordering since `from_key`. Args: @@ -695,6 +695,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): A map from room id to a tuple containing: - list of recent events in the room - stream ordering key for the start of the chunk of events returned. + - a boolean to indicate if there were more events but we hit the limit When Direction.FORWARDS: from_key < x <= to_key, (ascending order) When Direction.BACKWARDS: from_key >= x > to_key, (descending order) @@ -758,7 +759,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Tuple[List[EventBase], RoomStreamToken]: + ) -> Tuple[List[EventBase], RoomStreamToken, bool]: """ Paginate events by `stream_ordering` in the room from the `from_key` in the given `direction` to the `to_key` or `limit`. @@ -773,8 +774,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): limit: Maximum number of events to return Returns: - The results as a list of events and a token that points to the end - of the result set. If no events are returned then the end of the + The results as a list of events, a token that points to the end of + the result set, and a boolean to indicate if there were more events + but we hit the limit. If no events are returned then the end of the stream has been reached (i.e. there are no events between `from_key` and `to_key`). @@ -798,7 +800,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and to_key.is_before_or_eq(from_key) ): # Token selection matches what we do below if there are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False # Or vice-versa, if we're looking backwards and our `from_key` is already before # our `to_key`. elif ( @@ -807,7 +809,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and from_key.is_before_or_eq(to_key) ): # Token selection matches what we do below if there are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False # We can do a quick sanity check to see if any events have been sent in the room # since the earlier token. @@ -826,7 +828,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if not has_changed: # Token selection matches what we do below if there are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False order, from_bound, to_bound = generate_pagination_bounds( direction, from_key, to_key @@ -842,7 +844,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): engine=self.database_engine, ) - def f(txn: LoggingTransaction) -> List[_EventDictReturn]: + def f(txn: LoggingTransaction) -> Tuple[List[_EventDictReturn], bool]: sql = f""" SELECT event_id, instance_name, stream_ordering FROM events @@ -854,9 +856,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ txn.execute(sql, (room_id, 2 * limit)) + # Get all the rows and check if we hit the limit. + fetched_rows = txn.fetchall() + limited = len(fetched_rows) >= 2 * limit + rows = [ _EventDictReturn(event_id, None, stream_ordering) - for event_id, instance_name, stream_ordering in txn + for event_id, instance_name, stream_ordering in fetched_rows if _filter_results_by_stream( lower_token=( to_key if direction == Direction.BACKWARDS else from_key @@ -867,10 +873,17 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): instance_name=instance_name, stream_ordering=stream_ordering, ) - ][:limit] - return rows + ] - rows = await self.db_pool.runInteraction("get_room_events_stream_for_room", f) + if len(rows) > limit: + limited = True + + rows = rows[:limit] + return rows, limited + + rows, limited = await self.db_pool.runInteraction( + "get_room_events_stream_for_room", f + ) ret = await self.get_events_as_list( [r.event_id for r in rows], get_prev_content=True @@ -887,7 +900,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # `_paginate_room_events_by_topological_ordering_txn(...)`) next_key = to_key if to_key else from_key - return ret, next_key + return ret, next_key, limited @trace async def get_current_state_delta_membership_changes_for_user( @@ -1191,7 +1204,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if limit == 0: return [], end_token - rows, token = await self.db_pool.runInteraction( + rows, token, _ = await self.db_pool.runInteraction( "get_recent_event_ids_for_room", self._paginate_room_events_by_topological_ordering_txn, room_id, @@ -1765,7 +1778,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): topological=topological_ordering, stream=stream_ordering ) - rows, start_token = self._paginate_room_events_by_topological_ordering_txn( + rows, start_token, _ = self._paginate_room_events_by_topological_ordering_txn( txn, room_id, before_token, @@ -1775,7 +1788,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) events_before = [r.event_id for r in rows] - rows, end_token = self._paginate_room_events_by_topological_ordering_txn( + rows, end_token, _ = self._paginate_room_events_by_topological_ordering_txn( txn, room_id, after_token, @@ -1947,7 +1960,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): direction: Direction = Direction.BACKWARDS, limit: int = 0, event_filter: Optional[Filter] = None, - ) -> Tuple[List[_EventDictReturn], RoomStreamToken]: + ) -> Tuple[List[_EventDictReturn], RoomStreamToken, bool]: """Returns list of events before or after a given token. Args: @@ -1962,10 +1975,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): those that match the filter. Returns: - A list of _EventDictReturn and a token that points to the end of the - result set. If no events are returned then the end of the stream has - been reached (i.e. there are no events between `from_token` and - `to_token`), or `limit` is zero. + A list of _EventDictReturn, a token that points to the end of the + result set, and a boolean to indicate if there were more events but + we hit the limit. If no events are returned then the end of the + stream has been reached (i.e. there are no events between + `from_token` and `to_token`), or `limit` is zero. """ # We can bail early if we're looking forwards, and our `to_key` is already # before our `from_token`. @@ -1975,7 +1989,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and to_token.is_before_or_eq(from_token) ): # Token selection matches what we do below if there are no rows - return [], to_token if to_token else from_token + return [], to_token if to_token else from_token, False # Or vice-versa, if we're looking backwards and our `from_token` is already before # our `to_token`. elif ( @@ -1984,7 +1998,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and from_token.is_before_or_eq(to_token) ): # Token selection matches what we do below if there are no rows - return [], to_token if to_token else from_token + return [], to_token if to_token else from_token, False args: List[Any] = [room_id] @@ -2007,6 +2021,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): args.extend(filter_args) # We fetch more events as we'll filter the result set + requested_limit = int(limit) * 2 args.append(int(limit) * 2) select_keywords = "SELECT" @@ -2071,10 +2086,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): } txn.execute(sql, args) + # Get all the rows and check if we hit the limit. + fetched_rows = txn.fetchall() + limited = len(fetched_rows) >= requested_limit + # Filter the result set. rows = [ _EventDictReturn(event_id, topological_ordering, stream_ordering) - for event_id, instance_name, topological_ordering, stream_ordering in txn + for event_id, instance_name, topological_ordering, stream_ordering in fetched_rows if _filter_results( lower_token=( to_token if direction == Direction.BACKWARDS else from_token @@ -2086,7 +2105,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): topological_ordering=topological_ordering, stream_ordering=stream_ordering, ) - ][:limit] + ] + + if len(rows) > limit: + limited = True + + rows = rows[:limit] if rows: assert rows[-1].topological_ordering is not None @@ -2097,7 +2121,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # TODO (erikj): We should work out what to do here instead. next_token = to_token if to_token else from_token - return rows, next_token + return rows, next_token, limited @trace @tag_args @@ -2110,7 +2134,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): direction: Direction = Direction.BACKWARDS, limit: int = 0, event_filter: Optional[Filter] = None, - ) -> Tuple[List[EventBase], RoomStreamToken]: + ) -> Tuple[List[EventBase], RoomStreamToken, bool]: """ Paginate events by `topological_ordering` (tie-break with `stream_ordering`) in the room from the `from_key` in the given `direction` to the `to_key` or @@ -2127,8 +2151,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): event_filter: If provided filters the events to those that match the filter. Returns: - The results as a list of events and a token that points to the end - of the result set. If no events are returned then the end of the + The results as a list of events, a token that points to the end of + the result set, and a boolean to indicate if there were more events + but we hit the limit. If no events are returned then the end of the stream has been reached (i.e. there are no events between `from_key` and `to_key`). @@ -2152,7 +2177,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ): # Token selection matches what we do in `_paginate_room_events_txn` if there # are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False # Or vice-versa, if we're looking backwards and our `from_key` is already before # our `to_key`. elif ( @@ -2162,9 +2187,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ): # Token selection matches what we do in `_paginate_room_events_txn` if there # are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False - rows, token = await self.db_pool.runInteraction( + rows, token, limited = await self.db_pool.runInteraction( "paginate_room_events_by_topological_ordering", self._paginate_room_events_by_topological_ordering_txn, room_id, @@ -2179,7 +2204,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): [r.event_id for r in rows], get_prev_content=True ) - return events, token + return events, token, limited @cached() async def get_id_for_instance(self, instance_name: str) -> int: diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py index 0e027ff39d..2293994793 100644 --- a/tests/rest/client/sliding_sync/test_rooms_timeline.py +++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py @@ -22,7 +22,7 @@ import synapse.rest.admin from synapse.api.constants import EventTypes from synapse.rest.client import login, room, sync from synapse.server import HomeServer -from synapse.types import StreamToken, StrSequence +from synapse.types import StrSequence from synapse.util import Clock from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase @@ -149,16 +149,10 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): user2_tok = self.login(user2_id, "pass") room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.send(room_id1, "activity1", tok=user2_tok) - self.helper.send(room_id1, "activity2", tok=user2_tok) + event_response1 = self.helper.send(room_id1, "activity1", tok=user2_tok) + event_response2 = self.helper.send(room_id1, "activity2", tok=user2_tok) event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok) - event_pos3 = self.get_success( - self.store.get_position_for_event(event_response3["event_id"]) - ) event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok) - event_pos4 = self.get_success( - self.store.get_position_for_event(event_response4["event_id"]) - ) event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok) user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) @@ -196,27 +190,23 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): ) # Check to make sure the `prev_batch` points at the right place - prev_batch_token = self.get_success( - StreamToken.from_string( - self.store, response_body["rooms"][room_id1]["prev_batch"] - ) + prev_batch_token = response_body["rooms"][room_id1]["prev_batch"] + + # If we use the `prev_batch` token to look backwards we should see + # `event3` and older next. + channel = self.make_request( + "GET", + f"/rooms/{room_id1}/messages?from={prev_batch_token}&dir=b&limit=3", + access_token=user1_tok, ) - prev_batch_room_stream_token_serialized = self.get_success( - prev_batch_token.room_key.to_string(self.store) - ) - # If we use the `prev_batch` token to look backwards, we should see `event3` - # next so make sure the token encompasses it - self.assertEqual( - event_pos3.persisted_after(prev_batch_token.room_key), - False, - f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be >= event_pos3={self.get_success(event_pos3.to_room_stream_token().to_string(self.store))}", - ) - # If we use the `prev_batch` token to look backwards, we shouldn't see `event4` - # anymore since it was just returned in this response. - self.assertEqual( - event_pos4.persisted_after(prev_batch_token.room_key), - True, - f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be < event_pos4={self.get_success(event_pos4.to_room_stream_token().to_string(self.store))}", + self.assertEqual(channel.code, 200, channel.json_body) + self.assertListEqual( + [ + event_response3["event_id"], + event_response2["event_id"], + event_response1["event_id"], + ], + [ev["event_id"] for ev in channel.json_body["chunk"]], ) # With no `from_token` (initial sync), it's all historical since there is no diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index 7b7590da76..837eb434aa 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -147,7 +147,7 @@ class PaginationTestCase(HomeserverTestCase): def _filter_messages(self, filter: JsonDict) -> List[str]: """Make a request to /messages with a filter, returns the chunk of events.""" - events, next_key = self.get_success( + events, next_key, _ = self.get_success( self.hs.get_datastores().main.paginate_room_events_by_topological_ordering( room_id=self.room_id, from_key=self.from_token.room_key, From 9689ac3294c4e22ddbf1093c171b57e39810e734 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Sep 2024 10:20:30 +0100 Subject: [PATCH 220/332] Sliding Sync: Look for `bump _stamp` in the room timeline (#17684) This allows us to skip checking the database a lot of the time. --------- Co-authored-by: Eric Eastwood --- changelog.d/17684.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 148 ++++++++++++++-------- 2 files changed, 96 insertions(+), 53 deletions(-) create mode 100644 changelog.d/17684.misc diff --git a/changelog.d/17684.misc b/changelog.d/17684.misc new file mode 100644 index 0000000000..ecfb040a5f --- /dev/null +++ b/changelog.d/17684.misc @@ -0,0 +1 @@ +Speed up sliding sync by reducing number of database calls. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index cf368be9d9..b097ac57a2 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -1031,60 +1031,13 @@ class SlidingSyncHandler: # If we're joined to the room, we need to find the last bump event before the # `to_token` if room_membership_for_user_at_to_token.membership == Membership.JOIN: - # We can quickly query for the latest bump event in the room using the - # sliding sync tables. - latest_room_bump_stamp = await self.store.get_latest_bump_stamp_for_room( - room_id + # Try and get a bump stamp, if not we just fall back to the + # membership token. + new_bump_stamp = await self._get_bump_stamp( + room_id, to_token, timeline_events ) - - min_to_token_position = to_token.room_key.stream - - # If we can rely on the new sliding sync tables and the `bump_stamp` is - # `None`, just fallback to the membership event position. This can happen - # when we've just joined a remote room and all the events are backfilled. - if ( - # FIXME: The background job check can be removed once we bump - # `SCHEMA_COMPAT_VERSION` and run the foreground update for - # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` - # (tracked by https://github.com/element-hq/synapse/issues/17623) - await self.store.have_finished_sliding_sync_background_jobs() - and latest_room_bump_stamp is None - ): - pass - - # The `bump_stamp` stored in the database might be ahead of our token. Since - # `bump_stamp` is only a `stream_ordering` position, we can't be 100% sure - # that's before the `to_token` in all scenarios. The only scenario we can be - # sure of is if the `bump_stamp` is totally before the minimum position from - # the token. - # - # We don't need to check if the background update has finished, as if the - # returned bump stamp is not None then it must be up to date. - elif ( - latest_room_bump_stamp is not None - and latest_room_bump_stamp < min_to_token_position - ): - bump_stamp = latest_room_bump_stamp - - # Otherwise, if it's within or after the `to_token`, we need to find the - # last bump event before the `to_token`. - else: - last_bump_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, - to_token.room_key, - event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, - ) - ) - if last_bump_event_result is not None: - _, new_bump_event_pos = last_bump_event_result - - # If we've just joined a remote room, then the last bump event may - # have been backfilled (and so have a negative stream ordering). - # These negative stream orderings can't sensibly be compared, so - # instead we use the membership event position. - if new_bump_event_pos.stream > 0: - bump_stamp = new_bump_event_pos.stream + if new_bump_stamp is not None: + bump_stamp = new_bump_stamp unstable_expanded_timeline = False prev_room_sync_config = previous_connection_state.room_configs.get(room_id) @@ -1174,3 +1127,92 @@ class SlidingSyncHandler: notification_count=0, highlight_count=0, ) + + @trace + async def _get_bump_stamp( + self, room_id: str, to_token: StreamToken, timeline: List[EventBase] + ) -> Optional[int]: + """Get a bump stamp for the room, if we have a bump event + + Args: + room_id + to_token: The upper bound of token to return + timeline: The list of events we have fetched. + """ + + # First check the timeline events we're returning to see if one of + # those matches. We iterate backwards and take the stream ordering + # of the first event that matches the bump event types. + for timeline_event in reversed(timeline): + if timeline_event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES: + new_bump_stamp = timeline_event.internal_metadata.stream_ordering + + # All persisted events have a stream ordering + assert new_bump_stamp is not None + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_stamp > 0: + return new_bump_stamp + + # We can quickly query for the latest bump event in the room using the + # sliding sync tables. + latest_room_bump_stamp = await self.store.get_latest_bump_stamp_for_room( + room_id + ) + + min_to_token_position = to_token.room_key.stream + + # If we can rely on the new sliding sync tables and the `bump_stamp` is + # `None`, just fallback to the membership event position. This can happen + # when we've just joined a remote room and all the events are backfilled. + if ( + # FIXME: The background job check can be removed once we bump + # `SCHEMA_COMPAT_VERSION` and run the foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` + # (tracked by https://github.com/element-hq/synapse/issues/17623) + await self.store.have_finished_sliding_sync_background_jobs() + and latest_room_bump_stamp is None + ): + return None + + # The `bump_stamp` stored in the database might be ahead of our token. Since + # `bump_stamp` is only a `stream_ordering` position, we can't be 100% sure + # that's before the `to_token` in all scenarios. The only scenario we can be + # sure of is if the `bump_stamp` is totally before the minimum position from + # the token. + # + # We don't need to check if the background update has finished, as if the + # returned bump stamp is not None then it must be up to date. + elif ( + latest_room_bump_stamp is not None + and latest_room_bump_stamp < min_to_token_position + ): + if latest_room_bump_stamp > 0: + return latest_room_bump_stamp + else: + return None + + # Otherwise, if it's within or after the `to_token`, we need to find the + # last bump event before the `to_token`. + else: + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) + ) + if last_bump_event_result is not None: + _, new_bump_event_pos = last_bump_event_result + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_event_pos.stream > 0: + return new_bump_event_pos.stream + + return None From b3047f3f17187a319c6c2d6145917223b0f7bc84 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Sep 2024 10:22:46 +0100 Subject: [PATCH 221/332] Sliding sync: various fixups to the sliding sync joined room background job (#17673) Follow-up to #17652, https://github.com/element-hq/synapse/pull/17641, https://github.com/element-hq/synapse/pull/17634, https://github.com/element-hq/synapse/pull/17631 and https://github.com/element-hq/synapse/pull/17632 to fix-up https://github.com/element-hq/synapse/pull/17512 --- changelog.d/17673.misc | 1 + .../databases/main/events_bg_updates.py | 27 ++++++++++++------- 2 files changed, 19 insertions(+), 9 deletions(-) create mode 100644 changelog.d/17673.misc diff --git a/changelog.d/17673.misc b/changelog.d/17673.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17673.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 12670e87d2..b3244f7457 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1595,17 +1595,15 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # starve disk usage while this goes on. # # We upsert in case we have to run this multiple times. - # - # The `WHERE TRUE` clause is to avoid "Parsing Ambiguity" txn.execute( """ INSERT INTO sliding_sync_joined_rooms_to_recalculate (room_id) - SELECT room_id FROM rooms WHERE ? + SELECT DISTINCT room_id FROM local_current_membership + WHERE membership = 'join' ON CONFLICT (room_id) DO NOTHING; """, - (True,), ) await self.db_pool.runInteraction( @@ -1689,7 +1687,15 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS if not current_state_ids_map: continue - fetched_events = await self.get_events(current_state_ids_map.values()) + try: + fetched_events = await self.get_events(current_state_ids_map.values()) + except (DatabaseCorruptionError, InvalidEventError) as e: + logger.warning( + "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", + room_id, + e, + ) + continue current_state_map: StateMap[EventBase] = { state_key: fetched_events[event_id] @@ -1722,10 +1728,13 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS + "given we pulled the room out of `current_state_events`" ) most_recent_event_stream_ordering = most_recent_event_pos_results[1].stream - assert most_recent_event_stream_ordering > 0, ( - "We should have at-least one event in the room (our own join membership event for example) " - + "that isn't backfilled (negative `stream_ordering`) if we are joined to the room." - ) + + # The `most_recent_event_stream_ordering` should be positive, + # however there are (very rare) rooms where that is not the case in + # the matrix.org database. It's not clear how they got into that + # state, but does mean that we cannot assert that the stream + # ordering is indeed positive. + # Figure out the latest `bump_stamp` in the room. This could be `None` for a # federated room you just joined where all of events are still `outliers` or # backfilled history. In the Sliding Sync API, we default to the user's From a193d4a1b55f2289d51b81c431dd88dde30c950e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:34:26 +0100 Subject: [PATCH 222/332] Bump authlib from 1.3.1 to 1.3.2 (#17679) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 85950ff707..c0059de0a5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -35,13 +35,13 @@ tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "authlib" -version = "1.3.1" +version = "1.3.2" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = true python-versions = ">=3.8" files = [ - {file = "Authlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:d35800b973099bbadc49b42b256ecb80041ad56b7fe1216a362c7943c088f377"}, - {file = "authlib-1.3.1.tar.gz", hash = "sha256:7ae843f03c06c5c0debd63c9db91f9fda64fa62a42a77419fa15fbb7e7a58917"}, + {file = "Authlib-1.3.2-py2.py3-none-any.whl", hash = "sha256:ede026a95e9f5cdc2d4364a52103f5405e75aa156357e831ef2bfd0bc5094dfc"}, + {file = "authlib-1.3.2.tar.gz", hash = "sha256:4b16130117f9eb82aa6eec97f6dd4673c3f960ac0283ccdae2897ee4bc030ba2"}, ] [package.dependencies] From cd24bc2f36a4558fdc11e5bdeb67065faf638251 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:34:34 +0100 Subject: [PATCH 223/332] Bump ruff from 0.6.2 to 0.6.4 (#17680) --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index c0059de0a5..7687ae1364 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2268,29 +2268,29 @@ files = [ [[package]] name = "ruff" -version = "0.6.2" +version = "0.6.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.2-py3-none-linux_armv6l.whl", hash = "sha256:5c8cbc6252deb3ea840ad6a20b0f8583caab0c5ef4f9cca21adc5a92b8f79f3c"}, - {file = "ruff-0.6.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:17002fe241e76544448a8e1e6118abecbe8cd10cf68fde635dad480dba594570"}, - {file = "ruff-0.6.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3dbeac76ed13456f8158b8f4fe087bf87882e645c8e8b606dd17b0b66c2c1158"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:094600ee88cda325988d3f54e3588c46de5c18dae09d683ace278b11f9d4d534"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:316d418fe258c036ba05fbf7dfc1f7d3d4096db63431546163b472285668132b"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d72b8b3abf8a2d51b7b9944a41307d2f442558ccb3859bbd87e6ae9be1694a5d"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2aed7e243be68487aa8982e91c6e260982d00da3f38955873aecd5a9204b1d66"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d371f7fc9cec83497fe7cf5eaf5b76e22a8efce463de5f775a1826197feb9df8"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8f310d63af08f583363dfb844ba8f9417b558199c58a5999215082036d795a1"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7db6880c53c56addb8638fe444818183385ec85eeada1d48fc5abe045301b2f1"}, - {file = "ruff-0.6.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1175d39faadd9a50718f478d23bfc1d4da5743f1ab56af81a2b6caf0a2394f23"}, - {file = "ruff-0.6.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:5b939f9c86d51635fe486585389f54582f0d65b8238e08c327c1534844b3bb9a"}, - {file = "ruff-0.6.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d0d62ca91219f906caf9b187dea50d17353f15ec9bb15aae4a606cd697b49b4c"}, - {file = "ruff-0.6.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7438a7288f9d67ed3c8ce4d059e67f7ed65e9fe3aa2ab6f5b4b3610e57e3cb56"}, - {file = "ruff-0.6.2-py3-none-win32.whl", hash = "sha256:279d5f7d86696df5f9549b56b9b6a7f6c72961b619022b5b7999b15db392a4da"}, - {file = "ruff-0.6.2-py3-none-win_amd64.whl", hash = "sha256:d9f3469c7dd43cd22eb1c3fc16926fb8258d50cb1b216658a07be95dd117b0f2"}, - {file = "ruff-0.6.2-py3-none-win_arm64.whl", hash = "sha256:f28fcd2cd0e02bdf739297516d5643a945cc7caf09bd9bcb4d932540a5ea4fa9"}, - {file = "ruff-0.6.2.tar.gz", hash = "sha256:239ee6beb9e91feb8e0ec384204a763f36cb53fb895a1a364618c6abb076b3be"}, + {file = "ruff-0.6.4-py3-none-linux_armv6l.whl", hash = "sha256:c4b153fc152af51855458e79e835fb6b933032921756cec9af7d0ba2aa01a258"}, + {file = "ruff-0.6.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:bedff9e4f004dad5f7f76a9d39c4ca98af526c9b1695068198b3bda8c085ef60"}, + {file = "ruff-0.6.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d02a4127a86de23002e694d7ff19f905c51e338c72d8e09b56bfb60e1681724f"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7862f42fc1a4aca1ea3ffe8a11f67819d183a5693b228f0bb3a531f5e40336fc"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eebe4ff1967c838a1a9618a5a59a3b0a00406f8d7eefee97c70411fefc353617"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:932063a03bac394866683e15710c25b8690ccdca1cf192b9a98260332ca93408"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:50e30b437cebef547bd5c3edf9ce81343e5dd7c737cb36ccb4fe83573f3d392e"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c44536df7b93a587de690e124b89bd47306fddd59398a0fb12afd6133c7b3818"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ea086601b22dc5e7693a78f3fcfc460cceabfdf3bdc36dc898792aba48fbad6"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b52387d3289ccd227b62102c24714ed75fbba0b16ecc69a923a37e3b5e0aaaa"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0308610470fcc82969082fc83c76c0d362f562e2f0cdab0586516f03a4e06ec6"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:803b96dea21795a6c9d5bfa9e96127cc9c31a1987802ca68f35e5c95aed3fc0d"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:66dbfea86b663baab8fcae56c59f190caba9398df1488164e2df53e216248baa"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:34d5efad480193c046c86608dbba2bccdc1c5fd11950fb271f8086e0c763a5d1"}, + {file = "ruff-0.6.4-py3-none-win32.whl", hash = "sha256:f0f8968feea5ce3777c0d8365653d5e91c40c31a81d95824ba61d871a11b8523"}, + {file = "ruff-0.6.4-py3-none-win_amd64.whl", hash = "sha256:549daccee5227282289390b0222d0fbee0275d1db6d514550d65420053021a58"}, + {file = "ruff-0.6.4-py3-none-win_arm64.whl", hash = "sha256:ac4b75e898ed189b3708c9ab3fc70b79a433219e1e87193b4f2b77251d058d14"}, + {file = "ruff-0.6.4.tar.gz", hash = "sha256:ac3b5bfbee99973f80aa1b7cbd1c9cbce200883bdd067300c22a6cc1c7fba212"}, ] [[package]] @@ -3104,4 +3104,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "2bf09e2b68f3abd1a0f9ff2227eb3026ac3d034845acfc120d0b1cb8167ea43b" +content-hash = "26ff23a6cafd8593141cb3d54d7b1e94328a02b863d347578d2b6e666ee2bc93" diff --git a/pyproject.toml b/pyproject.toml index 69a82b8e1c..e93179c9e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.6.2" +ruff = "0.6.4" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From 2efed1d4fb925332438f9edfb939bbe3a92cb538 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:34:45 +0100 Subject: [PATCH 224/332] Bump types-setuptools from 71.1.0.20240818 to 74.1.0.20240907 (#17681) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7687ae1364..242d28877a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2823,13 +2823,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "71.1.0.20240818" +version = "74.1.0.20240907" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types-setuptools-71.1.0.20240818.tar.gz", hash = "sha256:f62eaffaa39774462c65fbb49368c4dc1d91a90a28371cb14e1af090ff0e41e3"}, - {file = "types_setuptools-71.1.0.20240818-py3-none-any.whl", hash = "sha256:c4f95302f88369ac0ac46c67ddbfc70c6c4dbbb184d9fed356244217a2934025"}, + {file = "types-setuptools-74.1.0.20240907.tar.gz", hash = "sha256:0abdb082552ca966c1e5fc244e4853adc62971f6cd724fb1d8a3713b580e5a65"}, + {file = "types_setuptools-74.1.0.20240907-py3-none-any.whl", hash = "sha256:15b38c8e63ca34f42f6063ff4b1dd662ea20086166d5ad6a102e670a52574120"}, ] [[package]] From d8b926d323565a1b3097da3beff558ce3403724c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:34:54 +0100 Subject: [PATCH 225/332] Bump idna from 3.7 to 3.8 (#17682) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 242d28877a..3f2f815d8b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -608,13 +608,13 @@ idna = ">=2.5" [[package]] name = "idna" -version = "3.7" +version = "3.8" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, + {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, + {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, ] [[package]] From 59bcbcec0a801ba0a2015d4e5bee1b5324619795 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 10 Sep 2024 08:42:01 -0600 Subject: [PATCH 226/332] 1.115.0rc1 --- CHANGES.md | 50 +++++++++++++++++++++++++++++++++++++++ changelog.d/17407.misc | 1 - changelog.d/17509.feature | 1 - changelog.d/17512.misc | 1 - changelog.d/17590.doc | 1 - changelog.d/17594.doc | 1 - changelog.d/17599.misc | 1 - changelog.d/17600.misc | 1 - changelog.d/17607.bugfix | 1 - changelog.d/17620.misc | 1 - changelog.d/17626.bugfix | 1 - changelog.d/17629.misc | 1 - changelog.d/17630.misc | 1 - changelog.d/17631.misc | 1 - changelog.d/17632.misc | 1 - changelog.d/17633.misc | 1 - changelog.d/17634.misc | 1 - changelog.d/17635.misc | 1 - changelog.d/17636.misc | 1 - changelog.d/17641.misc | 1 - changelog.d/17643.misc | 1 - changelog.d/17649.misc | 1 - changelog.d/17650.removal | 1 - changelog.d/17654.misc | 1 - changelog.d/17655.misc | 1 - changelog.d/17658.misc | 1 - changelog.d/17665.misc | 1 - changelog.d/17666.misc | 1 - changelog.d/17670.misc | 1 - changelog.d/17672.misc | 1 - changelog.d/17673.misc | 1 - changelog.d/17674.bugfix | 1 - changelog.d/17684.misc | 1 - changelog.d/17688.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 36 files changed, 57 insertions(+), 34 deletions(-) delete mode 100644 changelog.d/17407.misc delete mode 100644 changelog.d/17509.feature delete mode 100644 changelog.d/17512.misc delete mode 100644 changelog.d/17590.doc delete mode 100644 changelog.d/17594.doc delete mode 100644 changelog.d/17599.misc delete mode 100644 changelog.d/17600.misc delete mode 100644 changelog.d/17607.bugfix delete mode 100644 changelog.d/17620.misc delete mode 100644 changelog.d/17626.bugfix delete mode 100644 changelog.d/17629.misc delete mode 100644 changelog.d/17630.misc delete mode 100644 changelog.d/17631.misc delete mode 100644 changelog.d/17632.misc delete mode 100644 changelog.d/17633.misc delete mode 100644 changelog.d/17634.misc delete mode 100644 changelog.d/17635.misc delete mode 100644 changelog.d/17636.misc delete mode 100644 changelog.d/17641.misc delete mode 100644 changelog.d/17643.misc delete mode 100644 changelog.d/17649.misc delete mode 100644 changelog.d/17650.removal delete mode 100644 changelog.d/17654.misc delete mode 100644 changelog.d/17655.misc delete mode 100644 changelog.d/17658.misc delete mode 100644 changelog.d/17665.misc delete mode 100644 changelog.d/17666.misc delete mode 100644 changelog.d/17670.misc delete mode 100644 changelog.d/17672.misc delete mode 100644 changelog.d/17673.misc delete mode 100644 changelog.d/17674.bugfix delete mode 100644 changelog.d/17684.misc delete mode 100644 changelog.d/17688.misc diff --git a/CHANGES.md b/CHANGES.md index d3cec9cc15..227129d00a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,53 @@ +# Synapse 1.115.0rc1 (2024-09-10) + +### Features + +- Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. ([\#17509](https://github.com/element-hq/synapse/issues/17509)) + +### Bugfixes + +- Return `400 M_BAD_JSON` upon attempting to complete various room actions with a non-local user ID and unknown room ID, rather than an internal server error. ([\#17607](https://github.com/element-hq/synapse/issues/17607)) +- Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17626](https://github.com/element-hq/synapse/issues/17626)) +- Fix bug where we returned the wrong `bump_stamp` for invites in sliding sync response, causing incorrect ordering of invites in the room list. ([\#17674](https://github.com/element-hq/synapse/issues/17674)) + +### Improved Documentation + +- Clarify that the admin api resource is only loaded on the main process and not workers. ([\#17590](https://github.com/element-hq/synapse/issues/17590)) +- Fixed typo in `saml2_config` config [example](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#saml2_config). ([\#17594](https://github.com/element-hq/synapse/issues/17594)) + +### Deprecations and Removals + +- Stabilise [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156) by removing the `msc4156_enabled` config setting and defaulting it to `true`. ([\#17650](https://github.com/element-hq/synapse/issues/17650)) + +### Internal Changes + +- MSC3861: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407)) +- Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. ([\#17512](https://github.com/element-hq/synapse/issues/17512), [\#17632](https://github.com/element-hq/synapse/issues/17632), [\#17633](https://github.com/element-hq/synapse/issues/17633), [\#17634](https://github.com/element-hq/synapse/issues/17634), [\#17635](https://github.com/element-hq/synapse/issues/17635), [\#17636](https://github.com/element-hq/synapse/issues/17636), [\#17641](https://github.com/element-hq/synapse/issues/17641), [\#17654](https://github.com/element-hq/synapse/issues/17654), [\#17673](https://github.com/element-hq/synapse/issues/17673)) +- Store sliding sync per-connection state in the database. ([\#17599](https://github.com/element-hq/synapse/issues/17599), [\#17631](https://github.com/element-hq/synapse/issues/17631)) +- Make the sliding sync `PerConnectionState` class immutable. ([\#17600](https://github.com/element-hq/synapse/issues/17600)) +- Replace `isort` and `black` with `ruff`. ([\#17620](https://github.com/element-hq/synapse/issues/17620), [\#17643](https://github.com/element-hq/synapse/issues/17643)) +- Sliding Sync: Split up `get_room_membership_for_user_at_to_token`. ([\#17629](https://github.com/element-hq/synapse/issues/17629)) +- Use new database tables for sliding sync. ([\#17630](https://github.com/element-hq/synapse/issues/17630), [\#17649](https://github.com/element-hq/synapse/issues/17649)) +- Prevent duplicate tags being added to Sliding Sync traces. ([\#17655](https://github.com/element-hq/synapse/issues/17655)) +- Get `bump_stamp` from [new sliding sync tables](https://github.com/element-hq/synapse/pull/17512) which should be faster. ([\#17658](https://github.com/element-hq/synapse/issues/17658)) +- Speed up incremental Sliding Sync requests by avoiding extra work. ([\#17665](https://github.com/element-hq/synapse/issues/17665)) +- Small performance improvement in speeding up sliding sync. ([\#17666](https://github.com/element-hq/synapse/issues/17666), [\#17670](https://github.com/element-hq/synapse/issues/17670), [\#17672](https://github.com/element-hq/synapse/issues/17672)) +- Speed up sliding sync by reducing number of database calls. ([\#17684](https://github.com/element-hq/synapse/issues/17684)) +- Speed up sync by pulling out fewer events from the database. ([\#17688](https://github.com/element-hq/synapse/issues/17688)) + + + +### Updates to locked dependencies + +* Bump authlib from 1.3.1 to 1.3.2. ([\#17679](https://github.com/element-hq/synapse/issues/17679)) +* Bump idna from 3.7 to 3.8. ([\#17682](https://github.com/element-hq/synapse/issues/17682)) +* Bump ruff from 0.6.2 to 0.6.4. ([\#17680](https://github.com/element-hq/synapse/issues/17680)) +* Bump towncrier from 24.7.1 to 24.8.0. ([\#17645](https://github.com/element-hq/synapse/issues/17645)) +* Bump twisted from 24.7.0rc1 to 24.7.0. ([\#17647](https://github.com/element-hq/synapse/issues/17647)) +* Bump types-pillow from 10.2.0.20240520 to 10.2.0.20240822. ([\#17644](https://github.com/element-hq/synapse/issues/17644)) +* Bump types-psycopg2 from 2.9.21.20240417 to 2.9.21.20240819. ([\#17646](https://github.com/element-hq/synapse/issues/17646)) +* Bump types-setuptools from 71.1.0.20240818 to 74.1.0.20240907. ([\#17681](https://github.com/element-hq/synapse/issues/17681)) + # Synapse 1.114.0 (2024-09-02) This release enables support for diff --git a/changelog.d/17407.misc b/changelog.d/17407.misc deleted file mode 100644 index 9ed6e61a5b..0000000000 --- a/changelog.d/17407.misc +++ /dev/null @@ -1 +0,0 @@ -MSC3861: load the issuer and account management URLs from OIDC discovery. diff --git a/changelog.d/17509.feature b/changelog.d/17509.feature deleted file mode 100644 index 6d639ceb98..0000000000 --- a/changelog.d/17509.feature +++ /dev/null @@ -1 +0,0 @@ -Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. diff --git a/changelog.d/17512.misc b/changelog.d/17512.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17512.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17590.doc b/changelog.d/17590.doc deleted file mode 100644 index eced3d96cb..0000000000 --- a/changelog.d/17590.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify that the admin api resource is only loaded on the main process and not workers. \ No newline at end of file diff --git a/changelog.d/17594.doc b/changelog.d/17594.doc deleted file mode 100644 index 95b0042005..0000000000 --- a/changelog.d/17594.doc +++ /dev/null @@ -1 +0,0 @@ -Fixed typo in `saml2_config` config [example](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#saml2_config). diff --git a/changelog.d/17599.misc b/changelog.d/17599.misc deleted file mode 100644 index 2f81356d12..0000000000 --- a/changelog.d/17599.misc +++ /dev/null @@ -1 +0,0 @@ -Store sliding sync per-connection state in the database. diff --git a/changelog.d/17600.misc b/changelog.d/17600.misc deleted file mode 100644 index a81c67f6d1..0000000000 --- a/changelog.d/17600.misc +++ /dev/null @@ -1 +0,0 @@ -Make the sliding sync `PerConnectionState` class immutable. diff --git a/changelog.d/17607.bugfix b/changelog.d/17607.bugfix deleted file mode 100644 index 74201135b6..0000000000 --- a/changelog.d/17607.bugfix +++ /dev/null @@ -1 +0,0 @@ -Return `400 M_BAD_JSON` upon attempting to complete various room actions with a non-local user ID and unknown room ID, rather than an internal server error. \ No newline at end of file diff --git a/changelog.d/17620.misc b/changelog.d/17620.misc deleted file mode 100644 index f583cdcb38..0000000000 --- a/changelog.d/17620.misc +++ /dev/null @@ -1 +0,0 @@ -Replace `isort` and `black with `ruff`. diff --git a/changelog.d/17626.bugfix b/changelog.d/17626.bugfix deleted file mode 100644 index 1dbb2a2f45..0000000000 --- a/changelog.d/17626.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix authenticated media responses using a wrong limit when following redirects over federation. diff --git a/changelog.d/17629.misc b/changelog.d/17629.misc deleted file mode 100644 index 1eb46b2c68..0000000000 --- a/changelog.d/17629.misc +++ /dev/null @@ -1 +0,0 @@ -Sliding Sync: Split up `get_room_membership_for_user_at_to_token`. diff --git a/changelog.d/17630.misc b/changelog.d/17630.misc deleted file mode 100644 index ed1bf6bd55..0000000000 --- a/changelog.d/17630.misc +++ /dev/null @@ -1 +0,0 @@ -Use new database tables for sliding sync. diff --git a/changelog.d/17631.misc b/changelog.d/17631.misc deleted file mode 100644 index 2f81356d12..0000000000 --- a/changelog.d/17631.misc +++ /dev/null @@ -1 +0,0 @@ -Store sliding sync per-connection state in the database. diff --git a/changelog.d/17632.misc b/changelog.d/17632.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17632.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17633.misc b/changelog.d/17633.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17633.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17634.misc b/changelog.d/17634.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17634.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17635.misc b/changelog.d/17635.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17635.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17636.misc b/changelog.d/17636.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17636.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17641.misc b/changelog.d/17641.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17641.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17643.misc b/changelog.d/17643.misc deleted file mode 100644 index f583cdcb38..0000000000 --- a/changelog.d/17643.misc +++ /dev/null @@ -1 +0,0 @@ -Replace `isort` and `black with `ruff`. diff --git a/changelog.d/17649.misc b/changelog.d/17649.misc deleted file mode 100644 index ed1bf6bd55..0000000000 --- a/changelog.d/17649.misc +++ /dev/null @@ -1 +0,0 @@ -Use new database tables for sliding sync. diff --git a/changelog.d/17650.removal b/changelog.d/17650.removal deleted file mode 100644 index 1238815c08..0000000000 --- a/changelog.d/17650.removal +++ /dev/null @@ -1 +0,0 @@ -Stabilise [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156) by removing the `msc4156_enabled` config setting and defaulting it to `true`. \ No newline at end of file diff --git a/changelog.d/17654.misc b/changelog.d/17654.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17654.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17655.misc b/changelog.d/17655.misc deleted file mode 100644 index ce997d3b41..0000000000 --- a/changelog.d/17655.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent duplicate tags being added to Sliding Sync traces. diff --git a/changelog.d/17658.misc b/changelog.d/17658.misc deleted file mode 100644 index 0bdbc1140d..0000000000 --- a/changelog.d/17658.misc +++ /dev/null @@ -1 +0,0 @@ -Get `bump_stamp` from [new sliding sync tables](https://github.com/element-hq/synapse/pull/17512) which should be faster. diff --git a/changelog.d/17665.misc b/changelog.d/17665.misc deleted file mode 100644 index 28921087a6..0000000000 --- a/changelog.d/17665.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up incremental Sliding Sync requests by avoiding extra work. diff --git a/changelog.d/17666.misc b/changelog.d/17666.misc deleted file mode 100644 index 3550679247..0000000000 --- a/changelog.d/17666.misc +++ /dev/null @@ -1 +0,0 @@ -Small performance improvement in speeding up sliding sync. diff --git a/changelog.d/17670.misc b/changelog.d/17670.misc deleted file mode 100644 index 3550679247..0000000000 --- a/changelog.d/17670.misc +++ /dev/null @@ -1 +0,0 @@ -Small performance improvement in speeding up sliding sync. diff --git a/changelog.d/17672.misc b/changelog.d/17672.misc deleted file mode 100644 index 3550679247..0000000000 --- a/changelog.d/17672.misc +++ /dev/null @@ -1 +0,0 @@ -Small performance improvement in speeding up sliding sync. diff --git a/changelog.d/17673.misc b/changelog.d/17673.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17673.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17674.bugfix b/changelog.d/17674.bugfix deleted file mode 100644 index bbef5005a1..0000000000 --- a/changelog.d/17674.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we returned the wrong `bump_stamp` for invites in sliding sync response, causing incorrect ordering of invites in the room list. diff --git a/changelog.d/17684.misc b/changelog.d/17684.misc deleted file mode 100644 index ecfb040a5f..0000000000 --- a/changelog.d/17684.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up sliding sync by reducing number of database calls. diff --git a/changelog.d/17688.misc b/changelog.d/17688.misc deleted file mode 100644 index 7ba8d48fbe..0000000000 --- a/changelog.d/17688.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up sync by pulling out fewer events from the database. diff --git a/debian/changelog b/debian/changelog index dfb48edc49..51b082205d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.115.0~rc1) stable; urgency=medium + + * New Synapse release 1.115.0rc1. + + -- Synapse Packaging team Tue, 10 Sep 2024 08:39:09 -0600 + matrix-synapse-py3 (1.114.0) stable; urgency=medium * New Synapse release 1.114.0. diff --git a/pyproject.toml b/pyproject.toml index e93179c9e0..bd139e2834 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.114.0" +version = "1.115.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 5562a891689a1f5cb28874fea50407a507fc883f Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 10 Sep 2024 08:48:41 -0600 Subject: [PATCH 227/332] Update changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 227129d00a..b86a4c310d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -21,7 +21,7 @@ ### Internal Changes -- MSC3861: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407)) +- Update [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407)) - Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. ([\#17512](https://github.com/element-hq/synapse/issues/17512), [\#17632](https://github.com/element-hq/synapse/issues/17632), [\#17633](https://github.com/element-hq/synapse/issues/17633), [\#17634](https://github.com/element-hq/synapse/issues/17634), [\#17635](https://github.com/element-hq/synapse/issues/17635), [\#17636](https://github.com/element-hq/synapse/issues/17636), [\#17641](https://github.com/element-hq/synapse/issues/17641), [\#17654](https://github.com/element-hq/synapse/issues/17654), [\#17673](https://github.com/element-hq/synapse/issues/17673)) - Store sliding sync per-connection state in the database. ([\#17599](https://github.com/element-hq/synapse/issues/17599), [\#17631](https://github.com/element-hq/synapse/issues/17631)) - Make the sliding sync `PerConnectionState` class immutable. ([\#17600](https://github.com/element-hq/synapse/issues/17600)) From 62523571ae9cfb3ecc0c5f7d4dbdd877c158e929 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:30:37 +0100 Subject: [PATCH 228/332] Bump serde from 1.0.209 to 1.0.210 (#17686) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18936ab843..98a2a6c3cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", From be603de2cbbfdbd056ac137180c0f465180064c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:31:34 +0100 Subject: [PATCH 229/332] Bump serde_json from 1.0.127 to 1.0.128 (#17687) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98a2a6c3cd..586ac094a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", From 598a83d00500c2114d4e1db16ef0b413447f0f67 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:32:17 +0100 Subject: [PATCH 230/332] Bump cryptography from 43.0.0 to 43.0.1 (#17689) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 58 ++++++++++++++++++++++++++--------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3f2f815d8b..4c1756c78a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -357,38 +357,38 @@ files = [ [[package]] name = "cryptography" -version = "43.0.0" +version = "43.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-43.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf"}, - {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55"}, - {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431"}, - {file = "cryptography-43.0.0-cp37-abi3-win32.whl", hash = "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc"}, - {file = "cryptography-43.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778"}, - {file = "cryptography-43.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f"}, - {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0"}, - {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b"}, - {file = "cryptography-43.0.0-cp39-abi3-win32.whl", hash = "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf"}, - {file = "cryptography-43.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1"}, - {file = "cryptography-43.0.0.tar.gz", hash = "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e"}, + {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, + {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, + {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, + {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, + {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, + {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, + {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, ] [package.dependencies] @@ -401,7 +401,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.0)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] From 1b197752b63e6bb364d3a262abf8d4a71676217d Mon Sep 17 00:00:00 2001 From: Jeremy Wright Date: Tue, 10 Sep 2024 09:33:25 -0700 Subject: [PATCH 231/332] Fix minor misspelling in README.rst. (#17664) --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index d5625afe8f..2fe4a7e43f 100644 --- a/README.rst +++ b/README.rst @@ -158,7 +158,7 @@ it: We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to the public internet. Without it, anyone can freely register accounts on your homeserver. -This can be exploited by attackers to create spambots targetting the rest of the Matrix +This can be exploited by attackers to create spambots targeting the rest of the Matrix federation. Your new user name will be formed partly from the ``server_name``, and partly From 60441059a3bf4225a3bd16152b774aef14a797c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 18:05:31 +0100 Subject: [PATCH 232/332] Bump anyhow from 1.0.86 to 1.0.87 (#17685) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 586ac094a3..f1edc21b5f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" [[package]] name = "arc-swap" From e06e3c40047bb117c2f9f81b62be56a7ff5eb225 Mon Sep 17 00:00:00 2001 From: V02460 Date: Tue, 10 Sep 2024 19:27:46 +0200 Subject: [PATCH 233/332] Add config option turn_shared_secret_path (#17690) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17690.feature | 1 + docs/usage/configuration/config_documentation.md | 16 ++++++++++++++++ synapse/config/voip.py | 14 +++++++++++++- 3 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17690.feature diff --git a/changelog.d/17690.feature b/changelog.d/17690.feature new file mode 100644 index 0000000000..36c72f89f8 --- /dev/null +++ b/changelog.d/17690.feature @@ -0,0 +1 @@ +Add config option `turn_shared_secret_path`. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index c18f03d321..282b59dec9 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2315,6 +2315,22 @@ Example configuration: ```yaml turn_shared_secret: "YOUR_SHARED_SECRET" ``` +--- +### `turn_shared_secret_path` + +An alternative to [`turn_shared_secret`](#turn_shared_secret): +allows the shared secret to be specified in an external file. + +The file should be a plain text file, containing only the shared secret. +Synapse reads the shared secret from the given file once at startup. + +Example configuration: +```yaml +turn_shared_secret_path: /path/to/secrets/file +``` + +_Added in Synapse 1.116.0._ + --- ### `turn_username` and `turn_password` diff --git a/synapse/config/voip.py b/synapse/config/voip.py index 6fe43a9e32..8614a41dd4 100644 --- a/synapse/config/voip.py +++ b/synapse/config/voip.py @@ -23,7 +23,12 @@ from typing import Any from synapse.types import JsonDict -from ._base import Config +from ._base import Config, ConfigError, read_file + +CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\ +You have configured both `turn_shared_secret` and `turn_shared_secret_path`. +These are mutually incompatible. +""" class VoipConfig(Config): @@ -32,6 +37,13 @@ class VoipConfig(Config): def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.turn_uris = config.get("turn_uris", []) self.turn_shared_secret = config.get("turn_shared_secret") + turn_shared_secret_path = config.get("turn_shared_secret_path") + if turn_shared_secret_path: + if self.turn_shared_secret: + raise ConfigError(CONFLICTING_SHARED_SECRET_OPTS_ERROR) + self.turn_shared_secret = read_file( + turn_shared_secret_path, ("turn_shared_secret_path",) + ).strip() self.turn_username = config.get("turn_username") self.turn_password = config.get("turn_password") self.turn_user_lifetime = self.parse_duration( From a7fcac564848911a32d31865f7f259aa943629a8 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 10 Sep 2024 11:29:24 -0600 Subject: [PATCH 234/332] Enable guest access on new media endpoints, per MSC4189 (#17675) --- changelog.d/17675.feature | 1 + synapse/rest/client/media.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17675.feature diff --git a/changelog.d/17675.feature b/changelog.d/17675.feature new file mode 100644 index 0000000000..20db149ca8 --- /dev/null +++ b/changelog.d/17675.feature @@ -0,0 +1 @@ +Guests can use the new media endpoints to download media, as described by [MSC4189](https://github.com/matrix-org/matrix-spec-proposals/pull/4189). \ No newline at end of file diff --git a/synapse/rest/client/media.py b/synapse/rest/client/media.py index c30e3022de..25b302370f 100644 --- a/synapse/rest/client/media.py +++ b/synapse/rest/client/media.py @@ -138,7 +138,7 @@ class ThumbnailResource(RestServlet): ) -> None: # Validate the server name, raising if invalid parse_and_validate_server_name(server_name) - await self.auth.get_user_by_req(request) + await self.auth.get_user_by_req(request, allow_guest=True) set_cors_headers(request) set_corp_headers(request) @@ -229,7 +229,7 @@ class DownloadResource(RestServlet): # Validate the server name, raising if invalid parse_and_validate_server_name(server_name) - await self.auth.get_user_by_req(request) + await self.auth.get_user_by_req(request, allow_guest=True) set_cors_headers(request) set_corp_headers(request) From f6c2b0ec2ea6c63f569bade481fe2a4f3c1080ae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Sep 2024 13:16:57 +0100 Subject: [PATCH 235/332] Sliding sync: don't fetch room summary for named rooms. (#17683) For rooms with a name we can skip fetching a full room summary, as we don't need to calculate heroes, and instead just fetch the room counts directly. This also changes things to not return counts and heroes for non-joined rooms. For left/banned rooms we were returning zero values anyway, and for invite/knock rooms we don't really want to leak such information (even if some of is included in the stripped state). --- changelog.d/17683.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 78 +++++++++---------- synapse/storage/_base.py | 2 + synapse/storage/databases/main/roommember.py | 37 ++++++--- synapse/storage/databases/main/state.py | 8 ++ .../main/delta/87/03_current_state_index.sql | 19 +++++ .../client/sliding_sync/test_rooms_meta.py | 52 ++++++------- 7 files changed, 121 insertions(+), 76 deletions(-) create mode 100644 changelog.d/17683.misc create mode 100644 synapse/storage/schema/main/delta/87/03_current_state_index.sql diff --git a/changelog.d/17683.misc b/changelog.d/17683.misc new file mode 100644 index 0000000000..11a10ff854 --- /dev/null +++ b/changelog.d/17683.misc @@ -0,0 +1 @@ +Speed up sliding sync by reducing amount of data pulled out of the database for large rooms. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index b097ac57a2..04493494a6 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -784,32 +784,10 @@ class SlidingSyncHandler: ): avatar_changed = True + # We only need the room summary for calculating heroes, however if we do + # fetch it then we can use it to calculate `joined_count` and + # `invited_count`. room_membership_summary: Optional[Mapping[str, MemberSummary]] = None - empty_membership_summary = MemberSummary([], 0) - # We need the room summary for: - # - Always for initial syncs (or the first time we send down the room) - # - When the room has no name, we need `heroes` - # - When the membership has changed so we need to give updated `heroes` and - # `joined_count`/`invited_count`. - # - # Ideally, instead of just looking at `name_changed`, we'd check if the room - # name is not set but this is a good enough approximation that saves us from - # having to pull out the full event. This just means, we're generating the - # summary whenever the room name changes instead of only when it changes to - # `None`. - if initial or name_changed or membership_changed: - # We can't trace the function directly because it's cached and the `@cached` - # decorator doesn't mix with `@trace` yet. - with start_active_span("get_room_summary"): - if room_membership_for_user_at_to_token.membership in ( - Membership.LEAVE, - Membership.BAN, - ): - # TODO: Figure out how to get the membership summary for left/banned rooms - room_membership_summary = {} - else: - room_membership_summary = await self.store.get_room_summary(room_id) - # TODO: Reverse/rewind back to the `to_token` # `heroes` are required if the room name is not set. # @@ -828,11 +806,45 @@ class SlidingSyncHandler: # get them on initial syncs (or the first time we send down the room) or if the # membership has changed which may change the heroes. if name_event_id is None and (initial or (not initial and membership_changed)): - assert room_membership_summary is not None + # We need the room summary to extract the heroes from + if room_membership_for_user_at_to_token.membership != Membership.JOIN: + # TODO: Figure out how to get the membership summary for left/banned rooms + # For invite/knock rooms we don't include the information. + room_membership_summary = {} + else: + room_membership_summary = await self.store.get_room_summary(room_id) + # TODO: Reverse/rewind back to the `to_token` + hero_user_ids = extract_heroes_from_room_summary( room_membership_summary, me=user.to_string() ) + # Fetch the membership counts for rooms we're joined to. + # + # Similarly to other metadata, we only need to calculate the member + # counts if this is an initial sync or the memberships have changed. + joined_count: Optional[int] = None + invited_count: Optional[int] = None + if ( + initial or membership_changed + ) and room_membership_for_user_at_to_token.membership == Membership.JOIN: + # If we have the room summary (because we calculated heroes above) + # then we can simply pull the counts from there. + if room_membership_summary is not None: + empty_membership_summary = MemberSummary([], 0) + + joined_count = room_membership_summary.get( + Membership.JOIN, empty_membership_summary + ).count + + invited_count = room_membership_summary.get( + Membership.INVITE, empty_membership_summary + ).count + else: + member_counts = await self.store.get_member_counts(room_id) + joined_count = member_counts.get(Membership.JOIN, 0) + invited_count = member_counts.get(Membership.INVITE, 0) + # Fetch the `required_state` for the room # # No `required_state` for invite/knock rooms (just `stripped_state`) @@ -1090,20 +1102,6 @@ class SlidingSyncHandler: set_tag(SynapseTags.RESULT_PREFIX + "initial", initial) - joined_count: Optional[int] = None - if initial or membership_changed: - assert room_membership_summary is not None - joined_count = room_membership_summary.get( - Membership.JOIN, empty_membership_summary - ).count - - invited_count: Optional[int] = None - if initial or membership_changed: - assert room_membership_summary is not None - invited_count = room_membership_summary.get( - Membership.INVITE, empty_membership_summary - ).count - return SlidingSyncResult.RoomResult( name=room_name, avatar=room_avatar, diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index d22160b85c..d6deb077c8 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -112,6 +112,7 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache( "get_number_joined_users_in_room", (room_id,) ) + self._attempt_to_invalidate_cache("get_member_counts", (room_id,)) self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) # There's no easy way of invalidating this cache for just the users @@ -153,6 +154,7 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("get_current_hosts_in_room", (room_id,)) self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,)) self._attempt_to_invalidate_cache("get_number_joined_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("get_member_counts", (room_id,)) self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None) self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 8df760e8a6..db03729cfe 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -312,18 +312,10 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): # We do this all in one transaction to keep the cache small. # FIXME: get rid of this when we have room_stats - # Note, rejected events will have a null membership field, so - # we we manually filter them out. - sql = """ - SELECT count(*), membership FROM current_state_events - WHERE type = 'm.room.member' AND room_id = ? - AND membership IS NOT NULL - GROUP BY membership - """ + counts = self._get_member_counts_txn(txn, room_id) - txn.execute(sql, (room_id,)) res: Dict[str, MemberSummary] = {} - for count, membership in txn: + for membership, count in counts.items(): res.setdefault(membership, MemberSummary([], count)) # Order by membership (joins -> invites -> leave (former insiders) -> @@ -369,6 +361,31 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): "get_room_summary", _get_room_summary_txn ) + @cached() + async def get_member_counts(self, room_id: str) -> Mapping[str, int]: + """Get a mapping of number of users by membership""" + + return await self.db_pool.runInteraction( + "get_member_counts", self._get_member_counts_txn, room_id + ) + + def _get_member_counts_txn( + self, txn: LoggingTransaction, room_id: str + ) -> Dict[str, int]: + """Get a mapping of number of users by membership""" + + # Note, rejected events will have a null membership field, so + # we we manually filter them out. + sql = """ + SELECT count(*), membership FROM current_state_events + WHERE type = 'm.room.member' AND room_id = ? + AND membership IS NOT NULL + GROUP BY membership + """ + + txn.execute(sql, (room_id,)) + return {membership: count for count, membership in txn} + @cached() async def get_number_joined_users_in_room(self, room_id: str) -> int: return await self.db_pool.simple_select_one_onecol( diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index c5caaf56b0..ca31122ad3 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -736,6 +736,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events" + MEMBERS_CURRENT_STATE_UPDATE_NAME = "current_state_events_members_room_index" def __init__( self, @@ -764,6 +765,13 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): self.DELETE_CURRENT_STATE_UPDATE_NAME, self._background_remove_left_rooms, ) + self.db_pool.updates.register_background_index_update( + self.MEMBERS_CURRENT_STATE_UPDATE_NAME, + index_name="current_state_events_members_room_index", + table="current_state_events", + columns=["room_id", "membership"], + where_clause="type='m.room.member'", + ) async def _background_remove_left_rooms( self, progress: JsonDict, batch_size: int diff --git a/synapse/storage/schema/main/delta/87/03_current_state_index.sql b/synapse/storage/schema/main/delta/87/03_current_state_index.sql new file mode 100644 index 0000000000..76b974271c --- /dev/null +++ b/synapse/storage/schema/main/delta/87/03_current_state_index.sql @@ -0,0 +1,19 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + + +-- Add a background updates to add a new index: +-- `current_state_events(room_id, membership) WHERE type = 'm.room.member' +-- This makes counting membership in rooms (for syncs) much faster +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8701, 'current_state_events_members_room_index', '{}'); diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 6d2742e25f..6dbce7126f 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -371,14 +371,17 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): "mxc://UPDATED_DUMMY_MEDIA_ID", response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - 1, + + # We don't give extra room information to invitees + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 1, + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], ) + self.assertIsNone( response_body["rooms"][room_id1].get("is_dm"), ) @@ -450,15 +453,16 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): "mxc://DUMMY_MEDIA_ID", response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - # FIXME: The actual number should be "1" (user2) but we currently don't - # support this for rooms where the user has left/been banned. - 0, + + # FIXME: We possibly want to return joined and invited counts for rooms + # you're banned form + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 0, + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], ) self.assertIsNone( response_body["rooms"][room_id1].get("is_dm"), @@ -692,19 +696,15 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): [], ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - # FIXME: The actual number should be "1" (user2) but we currently don't - # support this for rooms where the user has left/been banned. - 0, + # FIXME: We possibly want to return joined and invited counts for rooms + # you're banned form + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - # We shouldn't see user5 since they were invited after user1 was banned. - # - # FIXME: The actual number should be "1" (user3) but we currently don't - # support this for rooms where the user has left/been banned. - 0, + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], ) def test_rooms_meta_heroes_incremental_sync_no_change(self) -> None: From 596b96411bfd852e757a8673ecac0dbc31f77735 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Sep 2024 15:38:46 +0100 Subject: [PATCH 236/332] Sliding sync: various fixups to the background update (#17652) --- changelog.d/17652.misc | 1 + synapse/storage/databases/main/events.py | 45 ++++- .../databases/main/events_bg_updates.py | 190 +++++++++++++----- tests/storage/test_sliding_sync_tables.py | 130 ------------ 4 files changed, 186 insertions(+), 180 deletions(-) create mode 100644 changelog.d/17652.misc diff --git a/changelog.d/17652.misc b/changelog.d/17652.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17652.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index e5f63019fd..c0b7d8107d 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1980,7 +1980,12 @@ class PersistEventsStore: if state_key == (EventTypes.Create, ""): room_type = event.content.get(EventContentFields.ROOM_TYPE) # Scrutinize JSON values - if room_type is None or isinstance(room_type, str): + if room_type is None or ( + isinstance(room_type, str) + # We ignore values with null bytes as Postgres doesn't allow them in + # text columns. + and "\0" not in room_type + ): sliding_sync_insert_map["room_type"] = room_type elif state_key == (EventTypes.RoomEncryption, ""): encryption_algorithm = event.content.get( @@ -1990,15 +1995,26 @@ class PersistEventsStore: sliding_sync_insert_map["is_encrypted"] = is_encrypted elif state_key == (EventTypes.Name, ""): room_name = event.content.get(EventContentFields.ROOM_NAME) - # Scrutinize JSON values - if room_name is None or isinstance(room_name, str): + # Scrutinize JSON values. We ignore values with nulls as + # postgres doesn't allow null bytes in text columns. + if room_name is None or ( + isinstance(room_name, str) + # We ignore values with null bytes as Postgres doesn't allow them in + # text columns. + and "\0" not in room_name + ): sliding_sync_insert_map["room_name"] = room_name elif state_key == (EventTypes.Tombstone, ""): successor_room_id = event.content.get( EventContentFields.TOMBSTONE_SUCCESSOR_ROOM ) # Scrutinize JSON values - if successor_room_id is None or isinstance(successor_room_id, str): + if successor_room_id is None or ( + isinstance(successor_room_id, str) + # We ignore values with null bytes as Postgres doesn't allow them in + # text columns. + and "\0" not in successor_room_id + ): sliding_sync_insert_map["tombstone_successor_room_id"] = ( successor_room_id ) @@ -2081,6 +2097,21 @@ class PersistEventsStore: else None ) + # Check for null bytes in the room name and type. We have to + # ignore values with null bytes as Postgres doesn't allow them + # in text columns. + if ( + sliding_sync_insert_map["room_name"] is not None + and "\0" in sliding_sync_insert_map["room_name"] + ): + sliding_sync_insert_map.pop("room_name") + + if ( + sliding_sync_insert_map["room_type"] is not None + and "\0" in sliding_sync_insert_map["room_type"] + ): + sliding_sync_insert_map.pop("room_type") + # Find the tombstone_successor_room_id # Note: This isn't one of the stripped state events according to the spec # but seems like there is no reason not to support this kind of thing. @@ -2095,6 +2126,12 @@ class PersistEventsStore: else None ) + if ( + sliding_sync_insert_map["tombstone_successor_room_id"] is not None + and "\0" in sliding_sync_insert_map["tombstone_successor_room_id"] + ): + sliding_sync_insert_map.pop("tombstone_successor_room_id") + else: # No stripped state provided sliding_sync_insert_map["has_known_state"] = False diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index b3244f7457..743200471b 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -47,6 +47,7 @@ from synapse.storage.databases.main.events_worker import ( ) from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.storage.databases.main.stream import StreamWorkerStore +from synapse.storage.engines import PostgresEngine from synapse.storage.types import Cursor from synapse.types import JsonDict, RoomStreamToken, StateMap, StrCollection from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES @@ -1877,9 +1878,29 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS def _find_memberships_to_update_txn( txn: LoggingTransaction, ) -> List[ - Tuple[str, Optional[str], str, str, str, str, int, Optional[str], bool] + Tuple[ + str, + Optional[str], + Optional[str], + str, + str, + str, + str, + int, + Optional[str], + bool, + ] ]: # Fetch the set of event IDs that we want to update + # + # We skip over rows which we've already handled, i.e. have a + # matching row in `sliding_sync_membership_snapshots` with the same + # room, user and event ID. + # + # We also ignore rooms that the user has left themselves (i.e. not + # kicked). This is to avoid having to port lots of old rooms that we + # will never send down sliding sync (as we exclude such rooms from + # initial syncs). if initial_phase: # There are some old out-of-band memberships (before @@ -1892,6 +1913,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS SELECT c.room_id, r.room_id, + r.room_version, c.user_id, e.sender, c.event_id, @@ -1900,9 +1922,11 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS e.instance_name, e.outlier FROM local_current_membership AS c + LEFT JOIN sliding_sync_membership_snapshots AS m USING (room_id, user_id) INNER JOIN events AS e USING (event_id) LEFT JOIN rooms AS r ON (c.room_id = r.room_id) WHERE (c.room_id, c.user_id) > (?, ?) + AND (m.user_id IS NULL OR c.event_id != m.membership_event_id) ORDER BY c.room_id ASC, c.user_id ASC LIMIT ? """, @@ -1922,7 +1946,8 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS """ SELECT c.room_id, - c.room_id, + r.room_id, + r.room_version, c.user_id, e.sender, c.event_id, @@ -1931,9 +1956,12 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS e.instance_name, e.outlier FROM local_current_membership AS c + LEFT JOIN sliding_sync_membership_snapshots AS m USING (room_id, user_id) INNER JOIN events AS e USING (event_id) - WHERE event_stream_ordering > ? - ORDER BY event_stream_ordering ASC + LEFT JOIN rooms AS r ON (c.room_id = r.room_id) + WHERE c.event_stream_ordering > ? + AND (m.user_id IS NULL OR c.event_id != m.membership_event_id) + ORDER BY c.event_stream_ordering ASC LIMIT ? """, (last_event_stream_ordering, batch_size), @@ -1944,7 +1972,16 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS memberships_to_update_rows = cast( List[ Tuple[ - str, Optional[str], str, str, str, str, int, Optional[str], bool + str, + Optional[str], + Optional[str], + str, + str, + str, + str, + int, + Optional[str], + bool, ] ], txn.fetchall(), @@ -1977,7 +2014,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS def _find_previous_invite_or_knock_membership_txn( txn: LoggingTransaction, room_id: str, user_id: str, event_id: str - ) -> Tuple[str, str]: + ) -> Optional[Tuple[str, str]]: # Find the previous invite/knock event before the leave event # # Here are some notes on how we landed on this query: @@ -2027,8 +2064,13 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) row = txn.fetchone() - # We should see a corresponding previous invite/knock event - assert row is not None + if row is None: + # Generally we should have an invite or knock event for leaves + # that are outliers, however this may not always be the case + # (e.g. a local user got kicked but the kick event got pulled in + # as an outlier). + return None + event_id, membership = row return event_id, membership @@ -2043,6 +2085,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS for ( room_id, room_id_from_rooms_table, + room_version_id, user_id, sender, membership_event_id, @@ -2061,6 +2104,14 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS Membership.BAN, ) + if ( + room_version_id is not None + and room_version_id not in KNOWN_ROOM_VERSIONS + ): + # Ignore rooms with unknown room versions (these were + # experimental rooms, that we no longer support). + continue + # There are some old out-of-band memberships (before # https://github.com/matrix-org/synapse/issues/6983) where we don't have the # corresponding room stored in the `rooms` table`. We have a `FOREIGN KEY` @@ -2148,14 +2199,17 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # in the events table though. We'll just say that we don't # know the state for these rooms and continue on with our # day. - sliding_sync_membership_snapshots_insert_map["has_known_state"] = ( - False - ) + sliding_sync_membership_snapshots_insert_map = { + "has_known_state": False, + "room_type": None, + "room_name": None, + "is_encrypted": False, + } elif membership in (Membership.INVITE, Membership.KNOCK) or ( membership in (Membership.LEAVE, Membership.BAN) and is_outlier ): - invite_or_knock_event_id = membership_event_id - invite_or_knock_membership = membership + invite_or_knock_event_id = None + invite_or_knock_membership = None # If the event is an `out_of_band_membership` (special case of # `outlier`), we never had historical state so we have to pull from @@ -2164,35 +2218,55 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # membership (i.e. the room shouldn't disappear if your using the # `is_encrypted` filter and you leave). if membership in (Membership.LEAVE, Membership.BAN) and is_outlier: - ( - invite_or_knock_event_id, - invite_or_knock_membership, - ) = await self.db_pool.runInteraction( + previous_membership = await self.db_pool.runInteraction( "sliding_sync_membership_snapshots_bg_update._find_previous_invite_or_knock_membership_txn", _find_previous_invite_or_knock_membership_txn, room_id, user_id, membership_event_id, ) + if previous_membership is not None: + ( + invite_or_knock_event_id, + invite_or_knock_membership, + ) = previous_membership + else: + invite_or_knock_event_id = membership_event_id + invite_or_knock_membership = membership - # Pull from the stripped state on the invite/knock event - invite_or_knock_event = await self.get_event(invite_or_knock_event_id) - - raw_stripped_state_events = None - if invite_or_knock_membership == Membership.INVITE: - invite_room_state = invite_or_knock_event.unsigned.get( - "invite_room_state" + if ( + invite_or_knock_event_id is not None + and invite_or_knock_membership is not None + ): + # Pull from the stripped state on the invite/knock event + invite_or_knock_event = await self.get_event( + invite_or_knock_event_id ) - raw_stripped_state_events = invite_room_state - elif invite_or_knock_membership == Membership.KNOCK: - knock_room_state = invite_or_knock_event.unsigned.get( - "knock_room_state" - ) - raw_stripped_state_events = knock_room_state - sliding_sync_membership_snapshots_insert_map = PersistEventsStore._get_sliding_sync_insert_values_from_stripped_state( - raw_stripped_state_events - ) + raw_stripped_state_events = None + if invite_or_knock_membership == Membership.INVITE: + invite_room_state = invite_or_knock_event.unsigned.get( + "invite_room_state" + ) + raw_stripped_state_events = invite_room_state + elif invite_or_knock_membership == Membership.KNOCK: + knock_room_state = invite_or_knock_event.unsigned.get( + "knock_room_state" + ) + raw_stripped_state_events = knock_room_state + + sliding_sync_membership_snapshots_insert_map = PersistEventsStore._get_sliding_sync_insert_values_from_stripped_state( + raw_stripped_state_events + ) + else: + # We couldn't find any state for the membership, so we just have to + # leave it as empty. + sliding_sync_membership_snapshots_insert_map = { + "has_known_state": False, + "room_type": None, + "room_name": None, + "is_encrypted": False, + } # We should have some insert values for each room, even if no # stripped state is on the event because we still want to record @@ -2311,19 +2385,42 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) # We need to find the `forgotten` value during the transaction because # we can't risk inserting stale data. - txn.execute( - """ - UPDATE sliding_sync_membership_snapshots - SET - forgotten = (SELECT forgotten FROM room_memberships WHERE event_id = ?) - WHERE room_id = ? and user_id = ? - """, - ( - membership_event_id, - room_id, - user_id, - ), - ) + if isinstance(txn.database_engine, PostgresEngine): + txn.execute( + """ + UPDATE sliding_sync_membership_snapshots + SET + forgotten = m.forgotten + FROM room_memberships AS m + WHERE sliding_sync_membership_snapshots.room_id = ? + AND sliding_sync_membership_snapshots.user_id = ? + AND membership_event_id = ? + AND membership_event_id = m.event_id + AND m.event_id IS NOT NULL + """, + ( + room_id, + user_id, + membership_event_id, + ), + ) + else: + # SQLite doesn't support UPDATE FROM before 3.33.0, so we do + # this via sub-selects. + txn.execute( + """ + UPDATE sliding_sync_membership_snapshots + SET + forgotten = (SELECT forgotten FROM room_memberships WHERE event_id = ?) + WHERE room_id = ? and user_id = ? AND membership_event_id = ? + """, + ( + membership_event_id, + room_id, + user_id, + membership_event_id, + ), + ) await self.db_pool.runInteraction( "sliding_sync_membership_snapshots_bg_update", _fill_table_txn @@ -2333,6 +2430,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ( room_id, _room_id_from_rooms_table, + _room_version_id, user_id, _sender, _membership_event_id, diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py index de80ad53cd..61dccc8077 100644 --- a/tests/storage/test_sliding_sync_tables.py +++ b/tests/storage/test_sliding_sync_tables.py @@ -4416,136 +4416,6 @@ class SlidingSyncTablesBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase): ), ) - def test_membership_snapshots_background_update_forgotten_partial(self) -> None: - """ - Test an existing `sliding_sync_membership_snapshots` row is updated with the - latest `forgotten` status after the background update passes over it. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - - # User1 joins the room - self.helper.join(room_id, user1_id, tok=user1_tok) - # User1 leaves the room (we have to leave in order to forget the room) - self.helper.leave(room_id, user1_id, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id) - ) - - # Forget the room - channel = self.make_request( - "POST", - f"/_matrix/client/r0/rooms/{room_id}/forget", - content={}, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.result) - - # Clean-up the `sliding_sync_joined_rooms` table as if the forgotten status - # never made it into the table. - self.get_success( - self.store.db_pool.simple_update( - table="sliding_sync_membership_snapshots", - keyvalues={"room_id": room_id}, - updatevalues={"forgotten": 0}, - desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_partial", - ) - ) - - # We should see the partial row that we made in preparation for the test. - sliding_sync_membership_snapshots_results = ( - self._get_sliding_sync_membership_snapshots() - ) - self.assertIncludes( - set(sliding_sync_membership_snapshots_results.keys()), - { - (room_id, user1_id), - (room_id, user2_id), - }, - exact=True, - ) - user1_snapshot = _SlidingSyncMembershipSnapshotResult( - room_id=room_id, - user_id=user1_id, - sender=user1_id, - membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, - membership=Membership.LEAVE, - event_stream_ordering=state_map[ - (EventTypes.Member, user1_id) - ].internal_metadata.stream_ordering, - has_known_state=True, - room_type=None, - room_name=None, - is_encrypted=False, - tombstone_successor_room_id=None, - # Room is *not* forgotten because of our test preparation - forgotten=False, - ) - self.assertEqual( - sliding_sync_membership_snapshots_results.get((room_id, user1_id)), - user1_snapshot, - ) - user2_snapshot = _SlidingSyncMembershipSnapshotResult( - room_id=room_id, - user_id=user2_id, - sender=user2_id, - membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, - membership=Membership.JOIN, - event_stream_ordering=state_map[ - (EventTypes.Member, user2_id) - ].internal_metadata.stream_ordering, - has_known_state=True, - room_type=None, - room_name=None, - is_encrypted=False, - tombstone_successor_room_id=None, - ) - self.assertEqual( - sliding_sync_membership_snapshots_results.get((room_id, user2_id)), - user2_snapshot, - ) - - # Insert and run the background update. - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, - "progress_json": "{}", - }, - ) - ) - self.store.db_pool.updates._all_done = False - self.wait_for_background_updates() - - # Make sure the table is populated - sliding_sync_membership_snapshots_results = ( - self._get_sliding_sync_membership_snapshots() - ) - self.assertIncludes( - set(sliding_sync_membership_snapshots_results.keys()), - { - (room_id, user1_id), - (room_id, user2_id), - }, - exact=True, - ) - # Forgotten status is now updated - self.assertEqual( - sliding_sync_membership_snapshots_results.get((room_id, user1_id)), - attr.evolve(user1_snapshot, forgotten=True), - ) - # Holds the info according to the current state when the user joined - self.assertEqual( - sliding_sync_membership_snapshots_results.get((room_id, user2_id)), - user2_snapshot, - ) - class SlidingSyncTablesCatchUpBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase): """ From b732d13d4cf6e6b474980b16b4920eebebe4b197 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Sep 2024 15:38:46 +0100 Subject: [PATCH 237/332] Sliding sync: various fixups to the background update (#17652) --- changelog.d/17652.misc | 1 + synapse/storage/databases/main/events.py | 45 ++++- .../databases/main/events_bg_updates.py | 190 +++++++++++++----- tests/storage/test_sliding_sync_tables.py | 130 ------------ 4 files changed, 186 insertions(+), 180 deletions(-) create mode 100644 changelog.d/17652.misc diff --git a/changelog.d/17652.misc b/changelog.d/17652.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17652.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index e5f63019fd..c0b7d8107d 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1980,7 +1980,12 @@ class PersistEventsStore: if state_key == (EventTypes.Create, ""): room_type = event.content.get(EventContentFields.ROOM_TYPE) # Scrutinize JSON values - if room_type is None or isinstance(room_type, str): + if room_type is None or ( + isinstance(room_type, str) + # We ignore values with null bytes as Postgres doesn't allow them in + # text columns. + and "\0" not in room_type + ): sliding_sync_insert_map["room_type"] = room_type elif state_key == (EventTypes.RoomEncryption, ""): encryption_algorithm = event.content.get( @@ -1990,15 +1995,26 @@ class PersistEventsStore: sliding_sync_insert_map["is_encrypted"] = is_encrypted elif state_key == (EventTypes.Name, ""): room_name = event.content.get(EventContentFields.ROOM_NAME) - # Scrutinize JSON values - if room_name is None or isinstance(room_name, str): + # Scrutinize JSON values. We ignore values with nulls as + # postgres doesn't allow null bytes in text columns. + if room_name is None or ( + isinstance(room_name, str) + # We ignore values with null bytes as Postgres doesn't allow them in + # text columns. + and "\0" not in room_name + ): sliding_sync_insert_map["room_name"] = room_name elif state_key == (EventTypes.Tombstone, ""): successor_room_id = event.content.get( EventContentFields.TOMBSTONE_SUCCESSOR_ROOM ) # Scrutinize JSON values - if successor_room_id is None or isinstance(successor_room_id, str): + if successor_room_id is None or ( + isinstance(successor_room_id, str) + # We ignore values with null bytes as Postgres doesn't allow them in + # text columns. + and "\0" not in successor_room_id + ): sliding_sync_insert_map["tombstone_successor_room_id"] = ( successor_room_id ) @@ -2081,6 +2097,21 @@ class PersistEventsStore: else None ) + # Check for null bytes in the room name and type. We have to + # ignore values with null bytes as Postgres doesn't allow them + # in text columns. + if ( + sliding_sync_insert_map["room_name"] is not None + and "\0" in sliding_sync_insert_map["room_name"] + ): + sliding_sync_insert_map.pop("room_name") + + if ( + sliding_sync_insert_map["room_type"] is not None + and "\0" in sliding_sync_insert_map["room_type"] + ): + sliding_sync_insert_map.pop("room_type") + # Find the tombstone_successor_room_id # Note: This isn't one of the stripped state events according to the spec # but seems like there is no reason not to support this kind of thing. @@ -2095,6 +2126,12 @@ class PersistEventsStore: else None ) + if ( + sliding_sync_insert_map["tombstone_successor_room_id"] is not None + and "\0" in sliding_sync_insert_map["tombstone_successor_room_id"] + ): + sliding_sync_insert_map.pop("tombstone_successor_room_id") + else: # No stripped state provided sliding_sync_insert_map["has_known_state"] = False diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index b3244f7457..743200471b 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -47,6 +47,7 @@ from synapse.storage.databases.main.events_worker import ( ) from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.storage.databases.main.stream import StreamWorkerStore +from synapse.storage.engines import PostgresEngine from synapse.storage.types import Cursor from synapse.types import JsonDict, RoomStreamToken, StateMap, StrCollection from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES @@ -1877,9 +1878,29 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS def _find_memberships_to_update_txn( txn: LoggingTransaction, ) -> List[ - Tuple[str, Optional[str], str, str, str, str, int, Optional[str], bool] + Tuple[ + str, + Optional[str], + Optional[str], + str, + str, + str, + str, + int, + Optional[str], + bool, + ] ]: # Fetch the set of event IDs that we want to update + # + # We skip over rows which we've already handled, i.e. have a + # matching row in `sliding_sync_membership_snapshots` with the same + # room, user and event ID. + # + # We also ignore rooms that the user has left themselves (i.e. not + # kicked). This is to avoid having to port lots of old rooms that we + # will never send down sliding sync (as we exclude such rooms from + # initial syncs). if initial_phase: # There are some old out-of-band memberships (before @@ -1892,6 +1913,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS SELECT c.room_id, r.room_id, + r.room_version, c.user_id, e.sender, c.event_id, @@ -1900,9 +1922,11 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS e.instance_name, e.outlier FROM local_current_membership AS c + LEFT JOIN sliding_sync_membership_snapshots AS m USING (room_id, user_id) INNER JOIN events AS e USING (event_id) LEFT JOIN rooms AS r ON (c.room_id = r.room_id) WHERE (c.room_id, c.user_id) > (?, ?) + AND (m.user_id IS NULL OR c.event_id != m.membership_event_id) ORDER BY c.room_id ASC, c.user_id ASC LIMIT ? """, @@ -1922,7 +1946,8 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS """ SELECT c.room_id, - c.room_id, + r.room_id, + r.room_version, c.user_id, e.sender, c.event_id, @@ -1931,9 +1956,12 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS e.instance_name, e.outlier FROM local_current_membership AS c + LEFT JOIN sliding_sync_membership_snapshots AS m USING (room_id, user_id) INNER JOIN events AS e USING (event_id) - WHERE event_stream_ordering > ? - ORDER BY event_stream_ordering ASC + LEFT JOIN rooms AS r ON (c.room_id = r.room_id) + WHERE c.event_stream_ordering > ? + AND (m.user_id IS NULL OR c.event_id != m.membership_event_id) + ORDER BY c.event_stream_ordering ASC LIMIT ? """, (last_event_stream_ordering, batch_size), @@ -1944,7 +1972,16 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS memberships_to_update_rows = cast( List[ Tuple[ - str, Optional[str], str, str, str, str, int, Optional[str], bool + str, + Optional[str], + Optional[str], + str, + str, + str, + str, + int, + Optional[str], + bool, ] ], txn.fetchall(), @@ -1977,7 +2014,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS def _find_previous_invite_or_knock_membership_txn( txn: LoggingTransaction, room_id: str, user_id: str, event_id: str - ) -> Tuple[str, str]: + ) -> Optional[Tuple[str, str]]: # Find the previous invite/knock event before the leave event # # Here are some notes on how we landed on this query: @@ -2027,8 +2064,13 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) row = txn.fetchone() - # We should see a corresponding previous invite/knock event - assert row is not None + if row is None: + # Generally we should have an invite or knock event for leaves + # that are outliers, however this may not always be the case + # (e.g. a local user got kicked but the kick event got pulled in + # as an outlier). + return None + event_id, membership = row return event_id, membership @@ -2043,6 +2085,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS for ( room_id, room_id_from_rooms_table, + room_version_id, user_id, sender, membership_event_id, @@ -2061,6 +2104,14 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS Membership.BAN, ) + if ( + room_version_id is not None + and room_version_id not in KNOWN_ROOM_VERSIONS + ): + # Ignore rooms with unknown room versions (these were + # experimental rooms, that we no longer support). + continue + # There are some old out-of-band memberships (before # https://github.com/matrix-org/synapse/issues/6983) where we don't have the # corresponding room stored in the `rooms` table`. We have a `FOREIGN KEY` @@ -2148,14 +2199,17 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # in the events table though. We'll just say that we don't # know the state for these rooms and continue on with our # day. - sliding_sync_membership_snapshots_insert_map["has_known_state"] = ( - False - ) + sliding_sync_membership_snapshots_insert_map = { + "has_known_state": False, + "room_type": None, + "room_name": None, + "is_encrypted": False, + } elif membership in (Membership.INVITE, Membership.KNOCK) or ( membership in (Membership.LEAVE, Membership.BAN) and is_outlier ): - invite_or_knock_event_id = membership_event_id - invite_or_knock_membership = membership + invite_or_knock_event_id = None + invite_or_knock_membership = None # If the event is an `out_of_band_membership` (special case of # `outlier`), we never had historical state so we have to pull from @@ -2164,35 +2218,55 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # membership (i.e. the room shouldn't disappear if your using the # `is_encrypted` filter and you leave). if membership in (Membership.LEAVE, Membership.BAN) and is_outlier: - ( - invite_or_knock_event_id, - invite_or_knock_membership, - ) = await self.db_pool.runInteraction( + previous_membership = await self.db_pool.runInteraction( "sliding_sync_membership_snapshots_bg_update._find_previous_invite_or_knock_membership_txn", _find_previous_invite_or_knock_membership_txn, room_id, user_id, membership_event_id, ) + if previous_membership is not None: + ( + invite_or_knock_event_id, + invite_or_knock_membership, + ) = previous_membership + else: + invite_or_knock_event_id = membership_event_id + invite_or_knock_membership = membership - # Pull from the stripped state on the invite/knock event - invite_or_knock_event = await self.get_event(invite_or_knock_event_id) - - raw_stripped_state_events = None - if invite_or_knock_membership == Membership.INVITE: - invite_room_state = invite_or_knock_event.unsigned.get( - "invite_room_state" + if ( + invite_or_knock_event_id is not None + and invite_or_knock_membership is not None + ): + # Pull from the stripped state on the invite/knock event + invite_or_knock_event = await self.get_event( + invite_or_knock_event_id ) - raw_stripped_state_events = invite_room_state - elif invite_or_knock_membership == Membership.KNOCK: - knock_room_state = invite_or_knock_event.unsigned.get( - "knock_room_state" - ) - raw_stripped_state_events = knock_room_state - sliding_sync_membership_snapshots_insert_map = PersistEventsStore._get_sliding_sync_insert_values_from_stripped_state( - raw_stripped_state_events - ) + raw_stripped_state_events = None + if invite_or_knock_membership == Membership.INVITE: + invite_room_state = invite_or_knock_event.unsigned.get( + "invite_room_state" + ) + raw_stripped_state_events = invite_room_state + elif invite_or_knock_membership == Membership.KNOCK: + knock_room_state = invite_or_knock_event.unsigned.get( + "knock_room_state" + ) + raw_stripped_state_events = knock_room_state + + sliding_sync_membership_snapshots_insert_map = PersistEventsStore._get_sliding_sync_insert_values_from_stripped_state( + raw_stripped_state_events + ) + else: + # We couldn't find any state for the membership, so we just have to + # leave it as empty. + sliding_sync_membership_snapshots_insert_map = { + "has_known_state": False, + "room_type": None, + "room_name": None, + "is_encrypted": False, + } # We should have some insert values for each room, even if no # stripped state is on the event because we still want to record @@ -2311,19 +2385,42 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) # We need to find the `forgotten` value during the transaction because # we can't risk inserting stale data. - txn.execute( - """ - UPDATE sliding_sync_membership_snapshots - SET - forgotten = (SELECT forgotten FROM room_memberships WHERE event_id = ?) - WHERE room_id = ? and user_id = ? - """, - ( - membership_event_id, - room_id, - user_id, - ), - ) + if isinstance(txn.database_engine, PostgresEngine): + txn.execute( + """ + UPDATE sliding_sync_membership_snapshots + SET + forgotten = m.forgotten + FROM room_memberships AS m + WHERE sliding_sync_membership_snapshots.room_id = ? + AND sliding_sync_membership_snapshots.user_id = ? + AND membership_event_id = ? + AND membership_event_id = m.event_id + AND m.event_id IS NOT NULL + """, + ( + room_id, + user_id, + membership_event_id, + ), + ) + else: + # SQLite doesn't support UPDATE FROM before 3.33.0, so we do + # this via sub-selects. + txn.execute( + """ + UPDATE sliding_sync_membership_snapshots + SET + forgotten = (SELECT forgotten FROM room_memberships WHERE event_id = ?) + WHERE room_id = ? and user_id = ? AND membership_event_id = ? + """, + ( + membership_event_id, + room_id, + user_id, + membership_event_id, + ), + ) await self.db_pool.runInteraction( "sliding_sync_membership_snapshots_bg_update", _fill_table_txn @@ -2333,6 +2430,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ( room_id, _room_id_from_rooms_table, + _room_version_id, user_id, _sender, _membership_event_id, diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py index de80ad53cd..61dccc8077 100644 --- a/tests/storage/test_sliding_sync_tables.py +++ b/tests/storage/test_sliding_sync_tables.py @@ -4416,136 +4416,6 @@ class SlidingSyncTablesBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase): ), ) - def test_membership_snapshots_background_update_forgotten_partial(self) -> None: - """ - Test an existing `sliding_sync_membership_snapshots` row is updated with the - latest `forgotten` status after the background update passes over it. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - - # User1 joins the room - self.helper.join(room_id, user1_id, tok=user1_tok) - # User1 leaves the room (we have to leave in order to forget the room) - self.helper.leave(room_id, user1_id, tok=user1_tok) - - state_map = self.get_success( - self.storage_controllers.state.get_current_state(room_id) - ) - - # Forget the room - channel = self.make_request( - "POST", - f"/_matrix/client/r0/rooms/{room_id}/forget", - content={}, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.result) - - # Clean-up the `sliding_sync_joined_rooms` table as if the forgotten status - # never made it into the table. - self.get_success( - self.store.db_pool.simple_update( - table="sliding_sync_membership_snapshots", - keyvalues={"room_id": room_id}, - updatevalues={"forgotten": 0}, - desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_partial", - ) - ) - - # We should see the partial row that we made in preparation for the test. - sliding_sync_membership_snapshots_results = ( - self._get_sliding_sync_membership_snapshots() - ) - self.assertIncludes( - set(sliding_sync_membership_snapshots_results.keys()), - { - (room_id, user1_id), - (room_id, user2_id), - }, - exact=True, - ) - user1_snapshot = _SlidingSyncMembershipSnapshotResult( - room_id=room_id, - user_id=user1_id, - sender=user1_id, - membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, - membership=Membership.LEAVE, - event_stream_ordering=state_map[ - (EventTypes.Member, user1_id) - ].internal_metadata.stream_ordering, - has_known_state=True, - room_type=None, - room_name=None, - is_encrypted=False, - tombstone_successor_room_id=None, - # Room is *not* forgotten because of our test preparation - forgotten=False, - ) - self.assertEqual( - sliding_sync_membership_snapshots_results.get((room_id, user1_id)), - user1_snapshot, - ) - user2_snapshot = _SlidingSyncMembershipSnapshotResult( - room_id=room_id, - user_id=user2_id, - sender=user2_id, - membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, - membership=Membership.JOIN, - event_stream_ordering=state_map[ - (EventTypes.Member, user2_id) - ].internal_metadata.stream_ordering, - has_known_state=True, - room_type=None, - room_name=None, - is_encrypted=False, - tombstone_successor_room_id=None, - ) - self.assertEqual( - sliding_sync_membership_snapshots_results.get((room_id, user2_id)), - user2_snapshot, - ) - - # Insert and run the background update. - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, - "progress_json": "{}", - }, - ) - ) - self.store.db_pool.updates._all_done = False - self.wait_for_background_updates() - - # Make sure the table is populated - sliding_sync_membership_snapshots_results = ( - self._get_sliding_sync_membership_snapshots() - ) - self.assertIncludes( - set(sliding_sync_membership_snapshots_results.keys()), - { - (room_id, user1_id), - (room_id, user2_id), - }, - exact=True, - ) - # Forgotten status is now updated - self.assertEqual( - sliding_sync_membership_snapshots_results.get((room_id, user1_id)), - attr.evolve(user1_snapshot, forgotten=True), - ) - # Holds the info according to the current state when the user joined - self.assertEqual( - sliding_sync_membership_snapshots_results.get((room_id, user2_id)), - user2_snapshot, - ) - class SlidingSyncTablesCatchUpBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase): """ From 76f7c91e441cd21f518b2fa79903cb4d03caaae3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Sep 2024 13:16:57 +0100 Subject: [PATCH 238/332] Sliding sync: don't fetch room summary for named rooms. (#17683) For rooms with a name we can skip fetching a full room summary, as we don't need to calculate heroes, and instead just fetch the room counts directly. This also changes things to not return counts and heroes for non-joined rooms. For left/banned rooms we were returning zero values anyway, and for invite/knock rooms we don't really want to leak such information (even if some of is included in the stripped state). --- changelog.d/17683.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 78 +++++++++---------- synapse/storage/_base.py | 2 + synapse/storage/databases/main/roommember.py | 37 ++++++--- synapse/storage/databases/main/state.py | 8 ++ .../main/delta/87/03_current_state_index.sql | 19 +++++ .../client/sliding_sync/test_rooms_meta.py | 52 ++++++------- 7 files changed, 121 insertions(+), 76 deletions(-) create mode 100644 changelog.d/17683.misc create mode 100644 synapse/storage/schema/main/delta/87/03_current_state_index.sql diff --git a/changelog.d/17683.misc b/changelog.d/17683.misc new file mode 100644 index 0000000000..11a10ff854 --- /dev/null +++ b/changelog.d/17683.misc @@ -0,0 +1 @@ +Speed up sliding sync by reducing amount of data pulled out of the database for large rooms. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index b097ac57a2..04493494a6 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -784,32 +784,10 @@ class SlidingSyncHandler: ): avatar_changed = True + # We only need the room summary for calculating heroes, however if we do + # fetch it then we can use it to calculate `joined_count` and + # `invited_count`. room_membership_summary: Optional[Mapping[str, MemberSummary]] = None - empty_membership_summary = MemberSummary([], 0) - # We need the room summary for: - # - Always for initial syncs (or the first time we send down the room) - # - When the room has no name, we need `heroes` - # - When the membership has changed so we need to give updated `heroes` and - # `joined_count`/`invited_count`. - # - # Ideally, instead of just looking at `name_changed`, we'd check if the room - # name is not set but this is a good enough approximation that saves us from - # having to pull out the full event. This just means, we're generating the - # summary whenever the room name changes instead of only when it changes to - # `None`. - if initial or name_changed or membership_changed: - # We can't trace the function directly because it's cached and the `@cached` - # decorator doesn't mix with `@trace` yet. - with start_active_span("get_room_summary"): - if room_membership_for_user_at_to_token.membership in ( - Membership.LEAVE, - Membership.BAN, - ): - # TODO: Figure out how to get the membership summary for left/banned rooms - room_membership_summary = {} - else: - room_membership_summary = await self.store.get_room_summary(room_id) - # TODO: Reverse/rewind back to the `to_token` # `heroes` are required if the room name is not set. # @@ -828,11 +806,45 @@ class SlidingSyncHandler: # get them on initial syncs (or the first time we send down the room) or if the # membership has changed which may change the heroes. if name_event_id is None and (initial or (not initial and membership_changed)): - assert room_membership_summary is not None + # We need the room summary to extract the heroes from + if room_membership_for_user_at_to_token.membership != Membership.JOIN: + # TODO: Figure out how to get the membership summary for left/banned rooms + # For invite/knock rooms we don't include the information. + room_membership_summary = {} + else: + room_membership_summary = await self.store.get_room_summary(room_id) + # TODO: Reverse/rewind back to the `to_token` + hero_user_ids = extract_heroes_from_room_summary( room_membership_summary, me=user.to_string() ) + # Fetch the membership counts for rooms we're joined to. + # + # Similarly to other metadata, we only need to calculate the member + # counts if this is an initial sync or the memberships have changed. + joined_count: Optional[int] = None + invited_count: Optional[int] = None + if ( + initial or membership_changed + ) and room_membership_for_user_at_to_token.membership == Membership.JOIN: + # If we have the room summary (because we calculated heroes above) + # then we can simply pull the counts from there. + if room_membership_summary is not None: + empty_membership_summary = MemberSummary([], 0) + + joined_count = room_membership_summary.get( + Membership.JOIN, empty_membership_summary + ).count + + invited_count = room_membership_summary.get( + Membership.INVITE, empty_membership_summary + ).count + else: + member_counts = await self.store.get_member_counts(room_id) + joined_count = member_counts.get(Membership.JOIN, 0) + invited_count = member_counts.get(Membership.INVITE, 0) + # Fetch the `required_state` for the room # # No `required_state` for invite/knock rooms (just `stripped_state`) @@ -1090,20 +1102,6 @@ class SlidingSyncHandler: set_tag(SynapseTags.RESULT_PREFIX + "initial", initial) - joined_count: Optional[int] = None - if initial or membership_changed: - assert room_membership_summary is not None - joined_count = room_membership_summary.get( - Membership.JOIN, empty_membership_summary - ).count - - invited_count: Optional[int] = None - if initial or membership_changed: - assert room_membership_summary is not None - invited_count = room_membership_summary.get( - Membership.INVITE, empty_membership_summary - ).count - return SlidingSyncResult.RoomResult( name=room_name, avatar=room_avatar, diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index d22160b85c..d6deb077c8 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -112,6 +112,7 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache( "get_number_joined_users_in_room", (room_id,) ) + self._attempt_to_invalidate_cache("get_member_counts", (room_id,)) self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) # There's no easy way of invalidating this cache for just the users @@ -153,6 +154,7 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("get_current_hosts_in_room", (room_id,)) self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,)) self._attempt_to_invalidate_cache("get_number_joined_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("get_member_counts", (room_id,)) self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None) self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 8df760e8a6..db03729cfe 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -312,18 +312,10 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): # We do this all in one transaction to keep the cache small. # FIXME: get rid of this when we have room_stats - # Note, rejected events will have a null membership field, so - # we we manually filter them out. - sql = """ - SELECT count(*), membership FROM current_state_events - WHERE type = 'm.room.member' AND room_id = ? - AND membership IS NOT NULL - GROUP BY membership - """ + counts = self._get_member_counts_txn(txn, room_id) - txn.execute(sql, (room_id,)) res: Dict[str, MemberSummary] = {} - for count, membership in txn: + for membership, count in counts.items(): res.setdefault(membership, MemberSummary([], count)) # Order by membership (joins -> invites -> leave (former insiders) -> @@ -369,6 +361,31 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): "get_room_summary", _get_room_summary_txn ) + @cached() + async def get_member_counts(self, room_id: str) -> Mapping[str, int]: + """Get a mapping of number of users by membership""" + + return await self.db_pool.runInteraction( + "get_member_counts", self._get_member_counts_txn, room_id + ) + + def _get_member_counts_txn( + self, txn: LoggingTransaction, room_id: str + ) -> Dict[str, int]: + """Get a mapping of number of users by membership""" + + # Note, rejected events will have a null membership field, so + # we we manually filter them out. + sql = """ + SELECT count(*), membership FROM current_state_events + WHERE type = 'm.room.member' AND room_id = ? + AND membership IS NOT NULL + GROUP BY membership + """ + + txn.execute(sql, (room_id,)) + return {membership: count for count, membership in txn} + @cached() async def get_number_joined_users_in_room(self, room_id: str) -> int: return await self.db_pool.simple_select_one_onecol( diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index c5caaf56b0..ca31122ad3 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -736,6 +736,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events" + MEMBERS_CURRENT_STATE_UPDATE_NAME = "current_state_events_members_room_index" def __init__( self, @@ -764,6 +765,13 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): self.DELETE_CURRENT_STATE_UPDATE_NAME, self._background_remove_left_rooms, ) + self.db_pool.updates.register_background_index_update( + self.MEMBERS_CURRENT_STATE_UPDATE_NAME, + index_name="current_state_events_members_room_index", + table="current_state_events", + columns=["room_id", "membership"], + where_clause="type='m.room.member'", + ) async def _background_remove_left_rooms( self, progress: JsonDict, batch_size: int diff --git a/synapse/storage/schema/main/delta/87/03_current_state_index.sql b/synapse/storage/schema/main/delta/87/03_current_state_index.sql new file mode 100644 index 0000000000..76b974271c --- /dev/null +++ b/synapse/storage/schema/main/delta/87/03_current_state_index.sql @@ -0,0 +1,19 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + + +-- Add a background updates to add a new index: +-- `current_state_events(room_id, membership) WHERE type = 'm.room.member' +-- This makes counting membership in rooms (for syncs) much faster +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8701, 'current_state_events_members_room_index', '{}'); diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 6d2742e25f..6dbce7126f 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -371,14 +371,17 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): "mxc://UPDATED_DUMMY_MEDIA_ID", response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - 1, + + # We don't give extra room information to invitees + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 1, + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], ) + self.assertIsNone( response_body["rooms"][room_id1].get("is_dm"), ) @@ -450,15 +453,16 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): "mxc://DUMMY_MEDIA_ID", response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - # FIXME: The actual number should be "1" (user2) but we currently don't - # support this for rooms where the user has left/been banned. - 0, + + # FIXME: We possibly want to return joined and invited counts for rooms + # you're banned form + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 0, + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], ) self.assertIsNone( response_body["rooms"][room_id1].get("is_dm"), @@ -692,19 +696,15 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): [], ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - # FIXME: The actual number should be "1" (user2) but we currently don't - # support this for rooms where the user has left/been banned. - 0, + # FIXME: We possibly want to return joined and invited counts for rooms + # you're banned form + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - # We shouldn't see user5 since they were invited after user1 was banned. - # - # FIXME: The actual number should be "1" (user3) but we currently don't - # support this for rooms where the user has left/been banned. - 0, + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], ) def test_rooms_meta_heroes_incremental_sync_no_change(self) -> None: From e4a1f271b9365e2dddbb9205b10ebe36eed12e62 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 11 Sep 2024 12:13:54 -0500 Subject: [PATCH 239/332] Sliding Sync: Make sure we get up-to-date information from `get_sliding_sync_rooms_for_user(...)` (#17692) We need to bust the `get_sliding_sync_rooms_for_user` cache when the room encryption is updated and any other field that is used in the query. Follow-up to https://github.com/element-hq/synapse/pull/17630 - Bust cache for membership change (cross-reference `get_rooms_for_user`) - Bust cache for room `encryption` (cross-reference `get_room_encryption`) - Bust cache for `forgotten` (cross-reference `did_forget`/`get_forgotten_rooms_for_user`) --- changelog.d/17692.bugfix | 1 + synapse/storage/_base.py | 1 + synapse/storage/databases/main/cache.py | 14 ++ synapse/storage/databases/main/roommember.py | 9 +- .../client/sliding_sync/test_sliding_sync.py | 191 +++++++++++++----- 5 files changed, 160 insertions(+), 56 deletions(-) create mode 100644 changelog.d/17692.bugfix diff --git a/changelog.d/17692.bugfix b/changelog.d/17692.bugfix new file mode 100644 index 0000000000..84e0754a99 --- /dev/null +++ b/changelog.d/17692.bugfix @@ -0,0 +1 @@ +Make sure we get up-to-date state information when using the new Sliding Sync tables to derive room membership. diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index d6deb077c8..e14d711c76 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -136,6 +136,7 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,)) self._attempt_to_invalidate_cache("get_room_type", (room_id,)) self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) + self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) def _invalidate_state_caches_all(self, room_id: str) -> None: """Invalidates caches that are based on the current state, but does diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index b0e30daee5..37c865a8e7 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -41,6 +41,7 @@ from synapse.storage.database import ( LoggingDatabaseConnection, LoggingTransaction, ) +from synapse.storage.databases.main.events import SLIDING_SYNC_RELEVANT_STATE_SET from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.util.caches.descriptors import CachedFunction @@ -271,12 +272,20 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "get_rooms_for_user", (data.state_key,) ) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user", None + ) elif data.type == EventTypes.RoomEncryption: self._attempt_to_invalidate_cache( "get_room_encryption", (data.room_id,) ) elif data.type == EventTypes.Create: self._attempt_to_invalidate_cache("get_room_type", (data.room_id,)) + + if (data.type, data.state_key) in SLIDING_SYNC_RELEVANT_STATE_SET: + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user", None + ) elif row.type == EventsStreamAllStateRow.TypeId: assert isinstance(data, EventsStreamAllStateRow) # Similar to the above, but the entire caches are invalidated. This is @@ -285,6 +294,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_rooms_for_user", None) self._attempt_to_invalidate_cache("get_room_type", (data.room_id,)) self._attempt_to_invalidate_cache("get_room_encryption", (data.room_id,)) + self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) else: raise Exception("Unknown events stream row type %s" % (row.type,)) @@ -365,6 +375,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): elif etype == EventTypes.RoomEncryption: self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) + if (etype, state_key) in SLIDING_SYNC_RELEVANT_STATE_SET: + self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) + if relates_to: self._attempt_to_invalidate_cache( "get_relations_for_event", @@ -477,6 +490,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "get_current_hosts_in_room_ordered", (room_id,) ) + self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) self._attempt_to_invalidate_cache("did_forget", None) self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) self._attempt_to_invalidate_cache("_get_membership_from_event_id", None) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index db03729cfe..1fc2d7ba1e 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1365,6 +1365,9 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): self._invalidate_cache_and_stream( txn, self.get_forgotten_rooms_for_user, (user_id,) ) + self._invalidate_cache_and_stream( + txn, self.get_sliding_sync_rooms_for_user, (user_id,) + ) await self.db_pool.runInteraction("forget_membership", f) @@ -1410,6 +1413,10 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def get_sliding_sync_rooms_for_user_txn( txn: LoggingTransaction, ) -> Dict[str, RoomsForUserSlidingSync]: + # XXX: If you use any new columns that can change (like from + # `sliding_sync_joined_rooms` or `forgotten`), make sure to bust the + # `get_sliding_sync_rooms_for_user` cache in the appropriate places (and add + # tests). sql = """ SELECT m.room_id, m.sender, m.membership, m.membership_event_id, r.room_version, @@ -1432,7 +1439,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): room_version_id=row[4], event_pos=PersistedEventPosition(row[5], row[6]), room_type=row[7], - is_encrypted=row[8], + is_encrypted=bool(row[8]), ) for row in txn } diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index 930cb5ef45..9e23dbe522 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -722,43 +722,37 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.join(space_room_id, user1_id, tok=user1_tok) # Make an initial Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "all-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 0, - "filters": {}, + sync_body = { + "lists": { + "all-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": { + "is_encrypted": True, + "room_types": [RoomTypes.SPACE], }, - "foo-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": { - "is_encrypted": True, - "room_types": [RoomTypes.SPACE], - }, - }, - } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - from_token = channel.json_body["pos"] + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) # Make sure the response has the lists we requested self.assertListEqual( - list(channel.json_body["lists"].keys()), + list(response_body["lists"].keys()), ["all-list", "foo-list"], - channel.json_body["lists"].keys(), + response_body["lists"].keys(), ) # Make sure the lists have the correct rooms self.assertListEqual( - list(channel.json_body["lists"]["all-list"]["ops"]), + list(response_body["lists"]["all-list"]["ops"]), [ { "op": "SYNC", @@ -768,7 +762,7 @@ class SlidingSyncTestCase(SlidingSyncBase): ], ) self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), + list(response_body["lists"]["foo-list"]["ops"]), [ { "op": "SYNC", @@ -783,35 +777,30 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.leave(space_room_id, user2_id, tok=user2_tok) # Make an incremental Sliding Sync request - channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - { - "lists": { - "all-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 0, - "filters": {}, + sync_body = { + "lists": { + "all-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": { + "is_encrypted": True, + "room_types": [RoomTypes.SPACE], }, - "foo-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": { - "is_encrypted": True, - "room_types": [RoomTypes.SPACE], - }, - }, - } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) + }, + } + } + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # Make sure the lists have the correct rooms even though we `newly_left` self.assertListEqual( - list(channel.json_body["lists"]["all-list"]["ops"]), + list(response_body["lists"]["all-list"]["ops"]), [ { "op": "SYNC", @@ -821,7 +810,7 @@ class SlidingSyncTestCase(SlidingSyncBase): ], ) self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), + list(response_body["lists"]["foo-list"]["ops"]), [ { "op": "SYNC", @@ -831,6 +820,98 @@ class SlidingSyncTestCase(SlidingSyncBase): ], ) + def test_filter_is_encrypted_up_to_date(self) -> None: + """ + Make sure we get up-to-date `is_encrypted` status for a joined room + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + # Update the encryption status + self.helper.send_state( + room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # We should see the room now because it's encrypted + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_forgotten_up_to_date(self) -> None: + """ + Make sure we get up-to-date `forgotten` status for rooms + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 is banned from the room (was never in the room) + self.helper.ban(room_id, src=user2_id, targ=user1_id, tok=user2_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # User1 forgets the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # We should no longer see the forgotten room + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + def test_sort_list(self) -> None: """ Test that the `lists` are sorted by `stream_ordering` From 16af80b8fbdeabcdea222b86c6f6b9da2f794565 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 11 Sep 2024 12:16:24 -0500 Subject: [PATCH 240/332] Sliding Sync: Use Sliding Sync tables for sorting (#17693) Use Sliding Sync tables for sorting (`bulk_get_last_event_pos_in_room_before_stream_ordering(...)` -> `_bulk_get_max_event_pos(...)`) --- changelog.d/17693.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 71 +++---------------- .../databases/main/events_bg_updates.py | 29 +------- .../storage/databases/main/events_worker.py | 12 ++++ synapse/storage/databases/main/roommember.py | 12 ---- synapse/storage/databases/main/stream.py | 34 +++++++-- synapse/types/storage/__init__.py | 47 ++++++++++++ tests/storage/test_sliding_sync_tables.py | 2 +- 8 files changed, 103 insertions(+), 105 deletions(-) create mode 100644 changelog.d/17693.misc create mode 100644 synapse/types/storage/__init__.py diff --git a/changelog.d/17693.misc b/changelog.d/17693.misc new file mode 100644 index 0000000000..0d20c80916 --- /dev/null +++ b/changelog.d/17693.misc @@ -0,0 +1 @@ +Use Sliding Sync tables as a bulk shortcut for getting the max `event_stream_ordering` of rooms. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 165b15c60f..652d05dbe9 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -27,6 +27,7 @@ from typing import ( Set, Tuple, Union, + cast, ) import attr @@ -355,11 +356,18 @@ class SlidingSyncRoomLists: if list_config.ranges: if list_config.ranges == [(0, len(filtered_sync_room_map) - 1)]: # If we are asking for the full range, we don't need to sort the list. - sorted_room_info = list(filtered_sync_room_map.values()) + sorted_room_info: List[RoomsForUserType] = list( + filtered_sync_room_map.values() + ) else: # Sort the list - sorted_room_info = await self.sort_rooms_using_tables( - filtered_sync_room_map, to_token + sorted_room_info = await self.sort_rooms( + # Cast is safe because RoomsForUserSlidingSync is part + # of the `RoomsForUserType` union. Why can't it detect this? + cast( + Dict[str, RoomsForUserType], filtered_sync_room_map + ), + to_token, ) for range in list_config.ranges: @@ -1762,63 +1770,6 @@ class SlidingSyncRoomLists: # Assemble a new sync room map but only with the `filtered_room_id_set` return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} - @trace - async def sort_rooms_using_tables( - self, - sync_room_map: Mapping[str, RoomsForUserSlidingSync], - to_token: StreamToken, - ) -> List[RoomsForUserSlidingSync]: - """ - Sort by `stream_ordering` of the last event that the user should see in the - room. `stream_ordering` is unique so we get a stable sort. - - Args: - sync_room_map: Dictionary of room IDs to sort along with membership - information in the room at the time of `to_token`. - to_token: We sort based on the events in the room at this token (<= `to_token`) - - Returns: - A sorted list of room IDs by `stream_ordering` along with membership information. - """ - - # Assemble a map of room ID to the `stream_ordering` of the last activity that the - # user should see in the room (<= `to_token`) - last_activity_in_room_map: Dict[str, int] = {} - - for room_id, room_for_user in sync_room_map.items(): - if room_for_user.membership != Membership.JOIN: - # If the user has left/been invited/knocked/been banned from a - # room, they shouldn't see anything past that point. - # - # FIXME: It's possible that people should see beyond this point - # in invited/knocked cases if for example the room has - # `invite`/`world_readable` history visibility, see - # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 - last_activity_in_room_map[room_id] = room_for_user.event_pos.stream - - # For fully-joined rooms, we find the latest activity at/before the - # `to_token`. - joined_room_positions = ( - await self.store.bulk_get_last_event_pos_in_room_before_stream_ordering( - [ - room_id - for room_id, room_for_user in sync_room_map.items() - if room_for_user.membership == Membership.JOIN - ], - to_token.room_key, - ) - ) - - last_activity_in_room_map.update(joined_room_positions) - - return sorted( - sync_room_map.values(), - # Sort by the last activity (stream_ordering) in the room - key=lambda room_info: last_activity_in_room_map[room_info.room_id], - # We want descending order - reverse=True, - ) - @trace async def sort_rooms( self, diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 743200471b..a8723f94bc 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -52,6 +52,7 @@ from synapse.storage.types import Cursor from synapse.types import JsonDict, RoomStreamToken, StateMap, StrCollection from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES from synapse.types.state import StateFilter +from synapse.types.storage import _BackgroundUpdates from synapse.util import json_encoder from synapse.util.iterutils import batch_iter @@ -76,34 +77,6 @@ _REPLACE_STREAM_ORDERING_SQL_COMMANDS = ( ) -class _BackgroundUpdates: - EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" - EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" - POPULATE_STREAM_ORDERING2 = "populate_stream_ordering2" - INDEX_STREAM_ORDERING2 = "index_stream_ordering2" - INDEX_STREAM_ORDERING2_CONTAINS_URL = "index_stream_ordering2_contains_url" - INDEX_STREAM_ORDERING2_ROOM_ORDER = "index_stream_ordering2_room_order" - INDEX_STREAM_ORDERING2_ROOM_STREAM = "index_stream_ordering2_room_stream" - INDEX_STREAM_ORDERING2_TS = "index_stream_ordering2_ts" - REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column" - - EVENT_EDGES_DROP_INVALID_ROWS = "event_edges_drop_invalid_rows" - EVENT_EDGES_REPLACE_INDEX = "event_edges_replace_index" - - EVENTS_POPULATE_STATE_KEY_REJECTIONS = "events_populate_state_key_rejections" - - EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index" - - SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE = ( - "sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update" - ) - SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE = "sliding_sync_joined_rooms_bg_update" - SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE = ( - "sliding_sync_membership_snapshots_bg_update" - ) - - @attr.s(slots=True, frozen=True, auto_attribs=True) class _CalculateChainCover: """Return value for _calculate_chain_cover_txn.""" diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index b188f32927..029f4bd87d 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -83,6 +83,7 @@ from synapse.storage.util.id_generators import ( from synapse.storage.util.sequence import build_sequence_generator from synapse.types import JsonDict, get_domain_from_id from synapse.types.state import StateFilter +from synapse.types.storage import _BackgroundUpdates from synapse.util import unwrapFirstError from synapse.util.async_helpers import ObservableDeferred, delay_cancellation from synapse.util.caches.descriptors import cached, cachedList @@ -2465,3 +2466,14 @@ class EventsWorkerStore(SQLBaseStore): ) self.invalidate_get_event_cache_after_txn(txn, event_id) + + async def have_finished_sliding_sync_background_jobs(self) -> bool: + """Return if it's safe to use the sliding sync membership tables.""" + + return await self.db_pool.updates.have_completed_background_updates( + ( + _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, + _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE, + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + ) + ) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 1fc2d7ba1e..8bfa6254f3 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -51,7 +51,6 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore -from synapse.storage.databases.main.events_bg_updates import _BackgroundUpdates from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import Sqlite3Engine from synapse.storage.roommember import ( @@ -1449,17 +1448,6 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): get_sliding_sync_rooms_for_user_txn, ) - async def have_finished_sliding_sync_background_jobs(self) -> bool: - """Return if it's safe to use the sliding sync membership tables.""" - - return await self.db_pool.updates.have_completed_background_updates( - ( - _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, - _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE, - _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, - ) - ) - class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__( diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 459436e304..94a7efee73 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1524,7 +1524,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # majority of rooms will have a latest token from before the min stream # pos. - def bulk_get_max_event_pos_txn( + def bulk_get_max_event_pos_fallback_txn( txn: LoggingTransaction, batched_room_ids: StrCollection ) -> Dict[str, int]: clause, args = make_in_list_sql_clause( @@ -1547,11 +1547,37 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): txn.execute(sql, [max_pos] + args) return {row[0]: row[1] for row in txn} + # It's easier to look at the `sliding_sync_joined_rooms` table and avoid all of + # the joins and sub-queries. + def bulk_get_max_event_pos_from_sliding_sync_tables_txn( + txn: LoggingTransaction, batched_room_ids: StrCollection + ) -> Dict[str, int]: + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", batched_room_ids + ) + sql = f""" + SELECT room_id, event_stream_ordering + FROM sliding_sync_joined_rooms + WHERE {clause} + ORDER BY event_stream_ordering DESC + """ + txn.execute(sql, args) + return {row[0]: row[1] for row in txn} + recheck_rooms: Set[str] = set() for batched in batch_iter(room_ids, 1000): - batch_results = await self.db_pool.runInteraction( - "_bulk_get_max_event_pos", bulk_get_max_event_pos_txn, batched - ) + if await self.have_finished_sliding_sync_background_jobs(): + batch_results = await self.db_pool.runInteraction( + "bulk_get_max_event_pos_from_sliding_sync_tables_txn", + bulk_get_max_event_pos_from_sliding_sync_tables_txn, + batched, + ) + else: + batch_results = await self.db_pool.runInteraction( + "bulk_get_max_event_pos_fallback_txn", + bulk_get_max_event_pos_fallback_txn, + batched, + ) for room_id, stream_ordering in batch_results.items(): if stream_ordering <= now_token.stream: results.update(batch_results) diff --git a/synapse/types/storage/__init__.py b/synapse/types/storage/__init__.py new file mode 100644 index 0000000000..fae5449bcc --- /dev/null +++ b/synapse/types/storage/__init__.py @@ -0,0 +1,47 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +# Originally licensed under the Apache License, Version 2.0: +# . +# +# [This file includes modifications made by New Vector Limited] +# +# + + +class _BackgroundUpdates: + EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" + EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + POPULATE_STREAM_ORDERING2 = "populate_stream_ordering2" + INDEX_STREAM_ORDERING2 = "index_stream_ordering2" + INDEX_STREAM_ORDERING2_CONTAINS_URL = "index_stream_ordering2_contains_url" + INDEX_STREAM_ORDERING2_ROOM_ORDER = "index_stream_ordering2_room_order" + INDEX_STREAM_ORDERING2_ROOM_STREAM = "index_stream_ordering2_room_stream" + INDEX_STREAM_ORDERING2_TS = "index_stream_ordering2_ts" + REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column" + + EVENT_EDGES_DROP_INVALID_ROWS = "event_edges_drop_invalid_rows" + EVENT_EDGES_REPLACE_INDEX = "event_edges_replace_index" + + EVENTS_POPULATE_STATE_KEY_REJECTIONS = "events_populate_state_key_rejections" + + EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index" + + SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE = ( + "sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update" + ) + SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE = "sliding_sync_joined_rooms_bg_update" + SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE = ( + "sliding_sync_membership_snapshots_bg_update" + ) diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py index 61dccc8077..35917505a4 100644 --- a/tests/storage/test_sliding_sync_tables.py +++ b/tests/storage/test_sliding_sync_tables.py @@ -34,11 +34,11 @@ from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.storage.databases.main.events import DeltaState from synapse.storage.databases.main.events_bg_updates import ( - _BackgroundUpdates, _resolve_stale_data_in_sliding_sync_joined_rooms_table, _resolve_stale_data_in_sliding_sync_membership_snapshots_table, ) from synapse.types import create_requester +from synapse.types.storage import _BackgroundUpdates from synapse.util import Clock from tests.test_utils.event_injection import create_event From ebad618bf0f4a7cd8adb5c65d6025d320387b492 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89loi=20Rivard?= Date: Wed, 11 Sep 2024 23:01:43 +0200 Subject: [PATCH 241/332] import pydantic objects from the `_pydantic_compat` module (#17667) This PR changes `from pydantic import BaseModel` to `from synapse._pydantic_compat import BaseModel` (as well as `constr`, `conbytes`, `conint`, `confloat`). It allows `check_pydantic_models.py` to mock those pydantic objects only in the synapse module, and not interfere with pydantic objects in external dependencies. This should solve the CI problems for #17144, which breaks because `check_pydantic_models.py` patches pydantic models from [scim2-models](https://scim2-models.readthedocs.io/). /cc @DMRobertson @gotmax23 fixes #17659 ### Pull Request Checklist * [x] Pull request is based on the develop branch * [x] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [x] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --- changelog.d/17667.misc | 5 ++ scripts-dev/check_pydantic_models.py | 50 +++++------------ synapse/_pydantic_compat.py | 64 +++++++++++++++++++++- synapse/config/_util.py | 10 +--- synapse/config/workers.py | 16 +++--- synapse/events/validator.py | 10 +--- synapse/http/servlet.py | 16 +++--- synapse/rest/admin/users.py | 7 +-- synapse/rest/client/account.py | 8 +-- synapse/rest/client/devices.py | 8 +-- synapse/rest/client/directory.py | 8 +-- synapse/rest/client/reporting.py | 6 +- synapse/rest/key/v2/remote_key_resource.py | 8 +-- synapse/storage/background_updates.py | 7 +-- synapse/types/handlers/sliding_sync.py | 13 ++--- synapse/types/rest/__init__.py | 9 +-- synapse/types/rest/client/__init__.py | 34 ++++-------- tests/rest/client/test_models.py | 8 +-- 18 files changed, 126 insertions(+), 161 deletions(-) create mode 100644 changelog.d/17667.misc diff --git a/changelog.d/17667.misc b/changelog.d/17667.misc new file mode 100644 index 0000000000..6526f283bc --- /dev/null +++ b/changelog.d/17667.misc @@ -0,0 +1,5 @@ +Import pydantic objects from the `_pydantic_compat` module. + +This allows `check_pydantic_models.py` to mock those pydantic objects +only in the synapse module, and not interfere with pydantic objects in +external dependencies. diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py index 26d667aba0..5eb1f0a9df 100755 --- a/scripts-dev/check_pydantic_models.py +++ b/scripts-dev/check_pydantic_models.py @@ -45,7 +45,6 @@ import traceback import unittest.mock from contextlib import contextmanager from typing import ( - TYPE_CHECKING, Any, Callable, Dict, @@ -57,30 +56,17 @@ from typing import ( ) from parameterized import parameterized - -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import ( - BaseModel as PydanticBaseModel, - conbytes, - confloat, - conint, - constr, - ) - from pydantic.v1.typing import get_args -else: - from pydantic import ( - BaseModel as PydanticBaseModel, - conbytes, - confloat, - conint, - constr, - ) - from pydantic.typing import get_args - from typing_extensions import ParamSpec +from synapse._pydantic_compat import ( + BaseModel as PydanticBaseModel, + conbytes, + confloat, + conint, + constr, + get_args, +) + logger = logging.getLogger(__name__) CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: List[Callable] = [ @@ -183,22 +169,16 @@ def monkeypatch_pydantic() -> Generator[None, None, None]: # Most Synapse code ought to import the patched objects directly from # `pydantic`. But we also patch their containing modules `pydantic.main` and # `pydantic.types` for completeness. - patch_basemodel1 = unittest.mock.patch( - "pydantic.BaseModel", new=PatchedBaseModel + patch_basemodel = unittest.mock.patch( + "synapse._pydantic_compat.BaseModel", new=PatchedBaseModel ) - patch_basemodel2 = unittest.mock.patch( - "pydantic.main.BaseModel", new=PatchedBaseModel - ) - patches.enter_context(patch_basemodel1) - patches.enter_context(patch_basemodel2) + patches.enter_context(patch_basemodel) for factory in CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: wrapper: Callable = make_wrapper(factory) - patch1 = unittest.mock.patch(f"pydantic.{factory.__name__}", new=wrapper) - patch2 = unittest.mock.patch( - f"pydantic.types.{factory.__name__}", new=wrapper + patch = unittest.mock.patch( + f"synapse._pydantic_compat.{factory.__name__}", new=wrapper ) - patches.enter_context(patch1) - patches.enter_context(patch2) + patches.enter_context(patch) yield diff --git a/synapse/_pydantic_compat.py b/synapse/_pydantic_compat.py index a6ceeb04d2..f0eedf5c6d 100644 --- a/synapse/_pydantic_compat.py +++ b/synapse/_pydantic_compat.py @@ -19,6 +19,8 @@ # # +from typing import TYPE_CHECKING + from packaging.version import Version try: @@ -30,4 +32,64 @@ except ImportError: HAS_PYDANTIC_V2: bool = Version(pydantic_version).major == 2 -__all__ = ("HAS_PYDANTIC_V2",) +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import ( + BaseModel, + Extra, + Field, + MissingError, + PydanticValueError, + StrictBool, + StrictInt, + StrictStr, + ValidationError, + conbytes, + confloat, + conint, + constr, + parse_obj_as, + validator, + ) + from pydantic.v1.error_wrappers import ErrorWrapper + from pydantic.v1.typing import get_args +else: + from pydantic import ( + BaseModel, + Extra, + Field, + MissingError, + PydanticValueError, + StrictBool, + StrictInt, + StrictStr, + ValidationError, + conbytes, + confloat, + conint, + constr, + parse_obj_as, + validator, + ) + from pydantic.error_wrappers import ErrorWrapper + from pydantic.typing import get_args + +__all__ = ( + "HAS_PYDANTIC_V2", + "BaseModel", + "constr", + "conbytes", + "conint", + "confloat", + "ErrorWrapper", + "Extra", + "Field", + "get_args", + "MissingError", + "parse_obj_as", + "PydanticValueError", + "StrictBool", + "StrictInt", + "StrictStr", + "ValidationError", + "validator", +) diff --git a/synapse/config/_util.py b/synapse/config/_util.py index 32b906a1ec..731b60a840 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -18,17 +18,11 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar +from typing import Any, Dict, Type, TypeVar import jsonschema -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel, ValidationError, parse_obj_as -else: - from pydantic import BaseModel, ValidationError, parse_obj_as - +from synapse._pydantic_compat import BaseModel, ValidationError, parse_obj_as from synapse.config._base import ConfigError from synapse.types import JsonDict, StrSequence diff --git a/synapse/config/workers.py b/synapse/config/workers.py index b013ffa354..ab896be307 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -22,17 +22,17 @@ import argparse import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Union import attr -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel, Extra, StrictBool, StrictInt, StrictStr -else: - from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr - +from synapse._pydantic_compat import ( + BaseModel, + Extra, + StrictBool, + StrictInt, + StrictStr, +) from synapse.config._base import ( Config, ConfigError, diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 73b63b77f2..8aa8d7e017 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -19,17 +19,11 @@ # # import collections.abc -from typing import TYPE_CHECKING, List, Type, Union, cast +from typing import List, Type, Union, cast import jsonschema -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import Field, StrictBool, StrictStr -else: - from pydantic import Field, StrictBool, StrictStr - +from synapse._pydantic_compat import Field, StrictBool, StrictStr from synapse.api.constants import ( MAX_ALIAS_LENGTH, EventContentFields, diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 08b8ff7afd..0330f1c878 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -37,19 +37,17 @@ from typing import ( overload, ) -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel, MissingError, PydanticValueError, ValidationError - from pydantic.v1.error_wrappers import ErrorWrapper -else: - from pydantic import BaseModel, MissingError, PydanticValueError, ValidationError - from pydantic.error_wrappers import ErrorWrapper - from typing_extensions import Literal from twisted.web.server import Request +from synapse._pydantic_compat import ( + BaseModel, + ErrorWrapper, + MissingError, + PydanticValueError, + ValidationError, +) from synapse.api.errors import Codes, SynapseError from synapse.http import redact_uri from synapse.http.server import HttpServer diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index ad515bd5a3..076994b87e 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -27,7 +27,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import attr -from synapse._pydantic_compat import HAS_PYDANTIC_V2 +from synapse._pydantic_compat import StrictBool from synapse.api.constants import Direction, UserTypes from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( @@ -56,11 +56,6 @@ from synapse.types.rest import RequestBodyModel if TYPE_CHECKING: from synapse.server import HomeServer -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import StrictBool -else: - from pydantic import StrictBool - logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 8daa449f9e..32fa7b4ec4 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -24,18 +24,12 @@ import random from typing import TYPE_CHECKING, List, Optional, Tuple from urllib.parse import urlparse -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import StrictBool, StrictStr, constr -else: - from pydantic import StrictBool, StrictStr, constr - import attr from typing_extensions import Literal from twisted.web.server import Request +from synapse._pydantic_compat import StrictBool, StrictStr, constr from synapse.api.constants import LoginType from synapse.api.errors import ( Codes, diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 8313d687b7..6a45a5d130 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -24,13 +24,7 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, List, Optional, Tuple -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import Extra, StrictStr -else: - from pydantic import Extra, StrictStr - +from synapse._pydantic_compat import Extra, StrictStr from synapse.api import errors from synapse.api.errors import NotFoundError, SynapseError, UnrecognizedRequestError from synapse.handlers.device import DeviceHandler diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index 11fdd0f7c6..98ba5c4c2a 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -22,17 +22,11 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import StrictStr -else: - from pydantic import StrictStr - from typing_extensions import Literal from twisted.web.server import Request +from synapse._pydantic_compat import StrictStr from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import ( diff --git a/synapse/rest/client/reporting.py b/synapse/rest/client/reporting.py index 4eee53e5a8..97bd5d8c02 100644 --- a/synapse/rest/client/reporting.py +++ b/synapse/rest/client/reporting.py @@ -23,7 +23,7 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, Tuple -from synapse._pydantic_compat import HAS_PYDANTIC_V2 +from synapse._pydantic_compat import StrictStr from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import ( @@ -40,10 +40,6 @@ from ._base import client_patterns if TYPE_CHECKING: from synapse.server import HomeServer -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import StrictStr -else: - from pydantic import StrictStr logger = logging.getLogger(__name__) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 3c2028a2ad..fea0b9706d 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -23,17 +23,11 @@ import logging import re from typing import TYPE_CHECKING, Dict, Mapping, Optional, Set, Tuple -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import Extra, StrictInt, StrictStr -else: - from pydantic import Extra, StrictInt, StrictStr - from signedjson.sign import sign_json from twisted.web.server import Request +from synapse._pydantic_compat import Extra, StrictInt, StrictStr from synapse.crypto.keyring import ServerKeyFetcher from synapse.http.server import HttpServer from synapse.http.servlet import ( diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index efe4238036..1194b58ffb 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -40,7 +40,7 @@ from typing import ( import attr -from synapse._pydantic_compat import HAS_PYDANTIC_V2 +from synapse._pydantic_compat import BaseModel from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.engines import PostgresEngine from synapse.storage.types import Connection, Cursor @@ -49,11 +49,6 @@ from synapse.util import Clock, json_encoder from . import engines -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel -else: - from pydantic import BaseModel - if TYPE_CHECKING: from synapse.server import HomeServer from synapse.storage.database import ( diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index e1b2af7a03..48badeacda 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -37,23 +37,20 @@ from typing import ( import attr -from synapse._pydantic_compat import HAS_PYDANTIC_V2 +from synapse._pydantic_compat import Extra from synapse.api.constants import EventTypes -from synapse.types import MultiWriterStreamToken, RoomStreamToken, StrCollection, UserID - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import Extra -else: - from pydantic import Extra - from synapse.events import EventBase from synapse.types import ( DeviceListUpdates, JsonDict, JsonMapping, + MultiWriterStreamToken, Requester, + RoomStreamToken, SlidingSyncStreamToken, + StrCollection, StreamToken, + UserID, ) from synapse.types.rest.client import SlidingSyncBody diff --git a/synapse/types/rest/__init__.py b/synapse/types/rest/__init__.py index 2b6f5ed35a..183831e79a 100644 --- a/synapse/types/rest/__init__.py +++ b/synapse/types/rest/__init__.py @@ -18,14 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING - -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel, Extra -else: - from pydantic import BaseModel, Extra +from synapse._pydantic_compat import BaseModel, Extra class RequestBodyModel(BaseModel): diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 9f6fb087c1..c739bd16b0 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -20,29 +20,15 @@ # from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import ( - Extra, - StrictBool, - StrictInt, - StrictStr, - conint, - constr, - validator, - ) -else: - from pydantic import ( - Extra, - StrictBool, - StrictInt, - StrictStr, - conint, - constr, - validator, - ) - +from synapse._pydantic_compat import ( + Extra, + StrictBool, + StrictInt, + StrictStr, + conint, + constr, + validator, +) from synapse.types.rest import RequestBodyModel from synapse.util.threepids import validate_email @@ -384,7 +370,7 @@ class SlidingSyncBody(RequestBodyModel): receipts: Optional[ReceiptsExtension] = None typing: Optional[TypingExtension] = None - conn_id: Optional[str] + conn_id: Optional[StrictStr] # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: diff --git a/tests/rest/client/test_models.py b/tests/rest/client/test_models.py index f8a56c80ca..f14585ccac 100644 --- a/tests/rest/client/test_models.py +++ b/tests/rest/client/test_models.py @@ -19,18 +19,12 @@ # # import unittest as stdlib_unittest -from typing import TYPE_CHECKING from typing_extensions import Literal -from synapse._pydantic_compat import HAS_PYDANTIC_V2 +from synapse._pydantic_compat import BaseModel, ValidationError from synapse.types.rest.client import EmailRequestTokenBody -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel, ValidationError -else: - from pydantic import BaseModel, ValidationError - class ThreepidMediumEnumTestCase(stdlib_unittest.TestCase): class Model(BaseModel): From 4c66a7cbedc718f1d85f776203939dbf4f32b88b Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 12 Sep 2024 11:10:31 +0100 Subject: [PATCH 242/332] 1.115.0rc2 --- CHANGES.md | 10 ++++++++++ changelog.d/17652.misc | 1 - changelog.d/17683.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 17 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/17652.misc delete mode 100644 changelog.d/17683.misc diff --git a/CHANGES.md b/CHANGES.md index b86a4c310d..961169b1ba 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +# Synapse 1.115.0rc2 (2024-09-12) + +### Internal Changes + +- Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. ([\#17652](https://github.com/element-hq/synapse/issues/17652)) +- Speed up sliding sync by reducing amount of data pulled out of the database for large rooms. ([\#17683](https://github.com/element-hq/synapse/issues/17683)) + + + + # Synapse 1.115.0rc1 (2024-09-10) ### Features diff --git a/changelog.d/17652.misc b/changelog.d/17652.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17652.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17683.misc b/changelog.d/17683.misc deleted file mode 100644 index 11a10ff854..0000000000 --- a/changelog.d/17683.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up sliding sync by reducing amount of data pulled out of the database for large rooms. diff --git a/debian/changelog b/debian/changelog index 51b082205d..fe6ab67414 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.115.0~rc2) stable; urgency=medium + + * New Synapse release 1.115.0rc2. + + -- Synapse Packaging team Thu, 12 Sep 2024 11:10:15 +0100 + matrix-synapse-py3 (1.115.0~rc1) stable; urgency=medium * New Synapse release 1.115.0rc1. diff --git a/pyproject.toml b/pyproject.toml index bd139e2834..7675d063b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.115.0rc1" +version = "1.115.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 9b83fb7c166579d8a700b5dfc141006ec0530954 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 12 Sep 2024 15:27:03 -0500 Subject: [PATCH 243/332] Sliding Sync: Move filters tests to rest layer (#17703) Move filters tests to rest layer in order to test the new (with sliding sync tables) and fallback paths that Sliding Sync can use. Also found a bug in the new path because it's not being tested which is also fixed in this PR. We now take into account `has_known_state` when filtering. Spawning from https://github.com/element-hq/synapse/pull/17662#discussion_r1755574791. This should have been done when we started using the new sliding sync tables in https://github.com/element-hq/synapse/pull/17630 --- changelog.d/17703.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 17 +- synapse/rest/client/sync.py | 2 +- synapse/storage/databases/main/roommember.py | 6 +- synapse/storage/roommember.py | 1 + tests/handlers/test_sliding_sync.py | 1385 +------------- .../client/sliding_sync/test_lists_filters.py | 1681 +++++++++++++++++ .../client/sliding_sync/test_rooms_meta.py | 58 + .../client/sliding_sync/test_sliding_sync.py | 572 ++---- 9 files changed, 1928 insertions(+), 1795 deletions(-) create mode 100644 changelog.d/17703.misc create mode 100644 tests/rest/client/sliding_sync/test_lists_filters.py diff --git a/changelog.d/17703.misc b/changelog.d/17703.misc new file mode 100644 index 0000000000..c5b0ea438a --- /dev/null +++ b/changelog.d/17703.misc @@ -0,0 +1 @@ +Refactor sliding sync filter unit tests so the sliding sync API has better test coverage. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 652d05dbe9..50f0786374 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -246,6 +246,7 @@ class SlidingSyncRoomLists: event_pos=change.event_pos, room_version_id=change.room_version_id, # We keep the current state of the room though + has_known_state=existing_room.has_known_state, room_type=existing_room.room_type, is_encrypted=existing_room.is_encrypted, ) @@ -270,6 +271,7 @@ class SlidingSyncRoomLists: event_id=change.event_id, event_pos=change.event_pos, room_version_id=change.room_version_id, + has_known_state=True, room_type=room_type, is_encrypted=is_encrypted, ) @@ -305,6 +307,7 @@ class SlidingSyncRoomLists: event_id=None, event_pos=newly_left_room_map[room_id], room_version_id=await self.store.get_room_version_id(room_id), + has_known_state=True, room_type=room_type, is_encrypted=is_encrypted, ) @@ -1630,12 +1633,14 @@ class SlidingSyncRoomLists: and room_type not in filters.room_types ): filtered_room_id_set.remove(room_id) + continue if ( filters.not_room_types is not None and room_type in filters.not_room_types ): filtered_room_id_set.remove(room_id) + continue if filters.room_name_like is not None: with start_active_span("filters.room_name_like"): @@ -1705,7 +1710,10 @@ class SlidingSyncRoomLists: filtered_room_id_set = { room_id for room_id in filtered_room_id_set - if sync_room_map[room_id].is_encrypted == filters.is_encrypted + # Remove rooms if we can't figure out what the encryption status is + if sync_room_map[room_id].has_known_state + # Or remove if it doesn't match the filter + and sync_room_map[room_id].is_encrypted == filters.is_encrypted } # Filter for rooms that the user has been invited to @@ -1734,6 +1742,11 @@ class SlidingSyncRoomLists: # Make a copy so we don't run into an error: `Set changed size during # iteration`, when we filter out and remove items for room_id in filtered_room_id_set.copy(): + # Remove rooms if we can't figure out what room type it is + if not sync_room_map[room_id].has_known_state: + filtered_room_id_set.remove(room_id) + continue + room_type = sync_room_map[room_id].room_type if ( @@ -1741,12 +1754,14 @@ class SlidingSyncRoomLists: and room_type not in filters.room_types ): filtered_room_id_set.remove(room_id) + continue if ( filters.not_room_types is not None and room_type in filters.not_room_types ): filtered_room_id_set.remove(room_id) + continue if filters.room_name_like is not None: with start_active_span("filters.room_name_like"): diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 9e2bf98189..364cf40153 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -1044,7 +1044,7 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms[room_id]["heroes"] = serialized_heroes # We should only include the `initial` key if it's `True` to save bandwidth. - # The absense of this flag means `False`. + # The absence of this flag means `False`. if room_result.initial: serialized_rooms[room_id]["initial"] = room_result.initial diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 8bfa6254f3..e321a1add2 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1420,6 +1420,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): SELECT m.room_id, m.sender, m.membership, m.membership_event_id, r.room_version, m.event_instance_name, m.event_stream_ordering, + m.has_known_state, COALESCE(j.room_type, m.room_type), COALESCE(j.is_encrypted, m.is_encrypted) FROM sliding_sync_membership_snapshots AS m @@ -1437,8 +1438,9 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): event_id=row[3], room_version_id=row[4], event_pos=PersistedEventPosition(row[5], row[6]), - room_type=row[7], - is_encrypted=bool(row[8]), + has_known_state=bool(row[7]), + room_type=row[8], + is_encrypted=bool(row[9]), ) for row in txn } diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index af71c01c17..9dc6c395e8 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -48,6 +48,7 @@ class RoomsForUserSlidingSync: event_pos: PersistedEventPosition room_version_id: str + has_known_state: bool room_type: Optional[str] is_encrypted: bool diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 7511a5b00a..e2c7a94ce2 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -18,7 +18,7 @@ # # import logging -from typing import AbstractSet, Dict, List, Optional, Tuple +from typing import AbstractSet, Dict, Optional, Tuple from unittest.mock import patch from parameterized import parameterized @@ -26,16 +26,11 @@ from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import ( - AccountDataTypes, - EventContentFields, EventTypes, JoinRules, Membership, - RoomTypes, ) from synapse.api.room_versions import RoomVersions -from synapse.events import StrippedStateEvent, make_event_from_dict -from synapse.events.snapshot import EventContext from synapse.handlers.sliding_sync import ( RoomsForUserType, RoomSyncConfig, @@ -2984,1384 +2979,6 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): self.assertTrue(room_id1 in newly_left) -class FilterRoomsTestCase(HomeserverTestCase): - """ - Tests Sliding Sync handler `filter_rooms()` to make sure it includes/excludes rooms - correctly. - """ - - servlets = [ - admin.register_servlets, - knock.register_servlets, - login.register_servlets, - room.register_servlets, - ] - - def default_config(self) -> JsonDict: - config = super().default_config() - # Enable sliding sync - config["experimental_features"] = {"msc3575_enabled": True} - return config - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.sliding_sync_handler = self.hs.get_sliding_sync_handler() - self.store = self.hs.get_datastores().main - self.event_sources = hs.get_event_sources() - - def _get_sync_room_ids_for_user( - self, - user: UserID, - to_token: StreamToken, - from_token: Optional[StreamToken], - ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: - """ - Get the rooms the user should be syncing with - """ - room_membership_for_user_map, newly_joined, newly_left = self.get_success( - self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( - user=user, - from_token=from_token, - to_token=to_token, - ) - ) - filtered_sync_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync( - user=user, - room_membership_for_user_map=room_membership_for_user_map, - newly_left_room_ids=newly_left, - ) - ) - - return filtered_sync_room_map, newly_joined, newly_left - - def _create_dm_room( - self, - inviter_user_id: str, - inviter_tok: str, - invitee_user_id: str, - invitee_tok: str, - ) -> str: - """ - Helper to create a DM room as the "inviter" and invite the "invitee" user to the room. The - "invitee" user also will join the room. The `m.direct` account data will be set - for both users. - """ - - # Create a room and send an invite the other user - room_id = self.helper.create_room_as( - inviter_user_id, - is_public=False, - tok=inviter_tok, - ) - self.helper.invite( - room_id, - src=inviter_user_id, - targ=invitee_user_id, - tok=inviter_tok, - extra_data={"is_direct": True}, - ) - # Person that was invited joins the room - self.helper.join(room_id, invitee_user_id, tok=invitee_tok) - - # Mimic the client setting the room as a direct message in the global account - # data - self.get_success( - self.store.add_account_data_for_user( - invitee_user_id, - AccountDataTypes.DIRECT, - {inviter_user_id: [room_id]}, - ) - ) - self.get_success( - self.store.add_account_data_for_user( - inviter_user_id, - AccountDataTypes.DIRECT, - {invitee_user_id: [room_id]}, - ) - ) - - return room_id - - _remote_invite_count: int = 0 - - def _create_remote_invite_room_for_user( - self, - invitee_user_id: str, - unsigned_invite_room_state: Optional[List[StrippedStateEvent]], - ) -> str: - """ - Create a fake invite for a remote room and persist it. - - We don't have any state for these kind of rooms and can only rely on the - stripped state included in the unsigned portion of the invite event to identify - the room. - - Args: - invitee_user_id: The person being invited - unsigned_invite_room_state: List of stripped state events to assist the - receiver in identifying the room. - - Returns: - The room ID of the remote invite room - """ - invite_room_id = f"!test_room{self._remote_invite_count}:remote_server" - - invite_event_dict = { - "room_id": invite_room_id, - "sender": "@inviter:remote_server", - "state_key": invitee_user_id, - "depth": 1, - "origin_server_ts": 1, - "type": EventTypes.Member, - "content": {"membership": Membership.INVITE}, - "auth_events": [], - "prev_events": [], - } - if unsigned_invite_room_state is not None: - serialized_stripped_state_events = [] - for stripped_event in unsigned_invite_room_state: - serialized_stripped_state_events.append( - { - "type": stripped_event.type, - "state_key": stripped_event.state_key, - "sender": stripped_event.sender, - "content": stripped_event.content, - } - ) - - invite_event_dict["unsigned"] = { - "invite_room_state": serialized_stripped_state_events - } - - invite_event = make_event_from_dict( - invite_event_dict, - room_version=RoomVersions.V10, - ) - invite_event.internal_metadata.outlier = True - invite_event.internal_metadata.out_of_band_membership = True - - self.get_success( - self.store.maybe_store_room_on_outlier_membership( - room_id=invite_room_id, room_version=invite_event.room_version - ) - ) - context = EventContext.for_outlier(self.hs.get_storage_controllers()) - persist_controller = self.hs.get_storage_controllers().persistence - assert persist_controller is not None - self.get_success(persist_controller.persist_event(invite_event, context)) - - self._remote_invite_count += 1 - - return invite_room_id - - def test_filter_dm_rooms(self) -> None: - """ - Test `filter.is_dm` for DM rooms - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - # Create a normal room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a DM room - dm_room_id = self._create_dm_room( - inviter_user_id=user1_id, - inviter_tok=user1_tok, - invitee_user_id=user2_id, - invitee_tok=user2_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - dm_room_ids = self.get_success( - self.sliding_sync_handler.room_lists._get_dm_rooms_for_user(user1_id) - ) - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_dm=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_dm=True, - ), - after_rooms_token, - dm_room_ids=dm_room_ids, - ) - ) - - self.assertEqual(truthy_filtered_room_map.keys(), {dm_room_id}) - - # Try with `is_dm=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_dm=False, - ), - after_rooms_token, - dm_room_ids=dm_room_ids, - ) - ) - - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_rooms(self) -> None: - """ - Test `filter.is_encrypted` for encrypted rooms - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_server_left_room(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` against a room that everyone has left. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - before_rooms_token = self.event_sources.get_current_token() - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - # Leave the room - self.helper.leave(room_id, user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - # Leave the room - self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - # We're using a `from_token` so that the room is considered `newly_left` and - # appears in our list of relevant sync rooms - from_token=before_rooms_token, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_server_left_room2(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` against a room that everyone has - left. - - There is still someone local who is invited to the rooms but that doesn't affect - whether the server is participating in the room (users need to be joined). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - _user2_tok = self.login(user2_id, "pass") - - before_rooms_token = self.event_sources.get_current_token() - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - # Invite user2 - self.helper.invite(room_id, targ=user2_id, tok=user1_tok) - # User1 leaves the room - self.helper.leave(room_id, user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - # Invite user2 - self.helper.invite(encrypted_room_id, targ=user2_id, tok=user1_tok) - # User1 leaves the room - self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - # We're using a `from_token` so that the room is considered `newly_left` and - # appears in our list of relevant sync rooms - from_token=before_rooms_token, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_after_we_left(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` against a room that was encrypted - after we left the room (make sure we don't just use the current state) - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - before_rooms_token = self.event_sources.get_current_token() - - # Create an unencrypted room - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - # Leave the room - self.helper.join(room_id, user1_id, tok=user1_tok) - self.helper.leave(room_id, user1_id, tok=user1_tok) - - # Create a room that will be encrypted - encrypted_after_we_left_room_id = self.helper.create_room_as( - user2_id, tok=user2_tok - ) - # Leave the room - self.helper.join(encrypted_after_we_left_room_id, user1_id, tok=user1_tok) - self.helper.leave(encrypted_after_we_left_room_id, user1_id, tok=user1_tok) - - # Encrypt the room after we've left - self.helper.send_state( - encrypted_after_we_left_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user2_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - # We're using a `from_token` so that the room is considered `newly_left` and - # appears in our list of relevant sync rooms - from_token=before_rooms_token, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # Even though we left the room before it was encrypted, we still see it because - # someone else on our server is still participating in the room and we "leak" - # the current state to the left user. But we consider the room encryption status - # to not be a secret given it's often set at the start of the room and it's one - # of the stripped state events that is normally handed out. - self.assertEqual( - truthy_filtered_room_map.keys(), {encrypted_after_we_left_room_id} - ) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # Even though we left the room before it was encrypted... (see comment above) - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_with_remote_invite_room_no_stripped_state(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` filter against a remote invite - room without any `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room without any `unsigned.invite_room_state` - _remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, None - ) - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should not appear because we can't figure out whether - # it is encrypted or not (no stripped state, `unsigned.invite_room_state`). - self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should not appear because we can't figure out whether - # it is encrypted or not (no stripped state, `unsigned.invite_room_state`). - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_with_remote_invite_encrypted_room(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` filter against a remote invite - encrypted room with some `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room with some `unsigned.invite_room_state` - # indicating that the room is encrypted. - remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, - [ - StrippedStateEvent( - type=EventTypes.Create, - state_key="", - sender="@inviter:remote_server", - content={ - EventContentFields.ROOM_CREATOR: "@inviter:remote_server", - EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, - }, - ), - StrippedStateEvent( - type=EventTypes.RoomEncryption, - state_key="", - sender="@inviter:remote_server", - content={ - EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", - }, - ), - ], - ) - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should appear here because it is encrypted - # according to the stripped state - self.assertEqual( - truthy_filtered_room_map.keys(), {encrypted_room_id, remote_invite_room_id} - ) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should not appear here because it is encrypted - # according to the stripped state - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_with_remote_invite_unencrypted_room(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` filter against a remote invite - unencrypted room with some `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room with some `unsigned.invite_room_state` - # but don't set any room encryption event. - remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, - [ - StrippedStateEvent( - type=EventTypes.Create, - state_key="", - sender="@inviter:remote_server", - content={ - EventContentFields.ROOM_CREATOR: "@inviter:remote_server", - EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, - }, - ), - # No room encryption event - ], - ) - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should not appear here because it is unencrypted - # according to the stripped state - self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should appear because it is unencrypted according to - # the stripped state - self.assertEqual( - falsy_filtered_room_map.keys(), {room_id, remote_invite_room_id} - ) - - def test_filter_invite_rooms(self) -> None: - """ - Test `filter.is_invite` for rooms that the user has been invited to - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - # Create a normal room - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - - # Create a room that user1 is invited to - invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_invite=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_invite=True, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(truthy_filtered_room_map.keys(), {invite_room_id}) - - # Try with `is_invite=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_invite=False, - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_room_types(self) -> None: - """ - Test `filter.room_types` for different room types - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - - # Create an arbitrarily typed room - foo_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": { - EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz" - } - }, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - # Try finding normal rooms and spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - room_types=[None, RoomTypes.SPACE] - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {room_id, space_room_id}) - - # Try finding an arbitrary room type - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - room_types=["org.matrix.foobarbaz"] - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {foo_room_id}) - - def test_filter_not_room_types(self) -> None: - """ - Test `filter.not_room_types` for different room types - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - - # Create an arbitrarily typed room - foo_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": { - EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz" - } - }, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try finding *NOT* normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(not_room_types=[None]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {space_room_id, foo_room_id}) - - # Try finding *NOT* spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - not_room_types=[RoomTypes.SPACE] - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {room_id, foo_room_id}) - - # Try finding *NOT* normal rooms or spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - not_room_types=[None, RoomTypes.SPACE] - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {foo_room_id}) - - # Test how it behaves when we have both `room_types` and `not_room_types`. - # `not_room_types` should win. - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - room_types=[None], not_room_types=[None] - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # Nothing matches because nothing is both a normal room and not a normal room - self.assertEqual(filtered_room_map.keys(), set()) - - # Test how it behaves when we have both `room_types` and `not_room_types`. - # `not_room_types` should win. - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - room_types=[None, RoomTypes.SPACE], not_room_types=[None] - ), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - def test_filter_room_types_server_left_room(self) -> None: - """ - Test that we can apply a `filter.room_types` against a room that everyone has left. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - before_rooms_token = self.event_sources.get_current_token() - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - # Leave the room - self.helper.leave(room_id, user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - # Leave the room - self.helper.leave(space_room_id, user1_id, tok=user1_tok) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - # We're using a `from_token` so that the room is considered `newly_left` and - # appears in our list of relevant sync rooms - from_token=before_rooms_token, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - def test_filter_room_types_server_left_room2(self) -> None: - """ - Test that we can apply a `filter.room_types` against a room that everyone has left. - - There is still someone local who is invited to the rooms but that doesn't affect - whether the server is participating in the room (users need to be joined). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - _user2_tok = self.login(user2_id, "pass") - - before_rooms_token = self.event_sources.get_current_token() - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - # Invite user2 - self.helper.invite(room_id, targ=user2_id, tok=user1_tok) - # User1 leaves the room - self.helper.leave(room_id, user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - # Invite user2 - self.helper.invite(space_room_id, targ=user2_id, tok=user1_tok) - # User1 leaves the room - self.helper.leave(space_room_id, user1_id, tok=user1_tok) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - # We're using a `from_token` so that the room is considered `newly_left` and - # appears in our list of relevant sync rooms - from_token=before_rooms_token, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - def test_filter_room_types_with_remote_invite_room_no_stripped_state(self) -> None: - """ - Test that we can apply a `filter.room_types` filter against a remote invite - room without any `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room without any `unsigned.invite_room_state` - _remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, None - ) - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should not appear because we can't figure out what - # room type it is (no stripped state, `unsigned.invite_room_state`) - self.assertEqual(filtered_room_map.keys(), {room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should not appear because we can't figure out what - # room type it is (no stripped state, `unsigned.invite_room_state`) - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - def test_filter_room_types_with_remote_invite_space(self) -> None: - """ - Test that we can apply a `filter.room_types` filter against a remote invite - to a space room with some `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room with some `unsigned.invite_room_state` indicating - # that it is a space room - remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, - [ - StrippedStateEvent( - type=EventTypes.Create, - state_key="", - sender="@inviter:remote_server", - content={ - EventContentFields.ROOM_CREATOR: "@inviter:remote_server", - EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, - # Specify that it is a space room - EventContentFields.ROOM_TYPE: RoomTypes.SPACE, - }, - ), - ], - ) - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should not appear here because it is a space room - # according to the stripped state - self.assertEqual(filtered_room_map.keys(), {room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should appear here because it is a space room - # according to the stripped state - self.assertEqual( - filtered_room_map.keys(), {space_room_id, remote_invite_room_id} - ) - - def test_filter_room_types_with_remote_invite_normal_room(self) -> None: - """ - Test that we can apply a `filter.room_types` filter against a remote invite - to a normal room with some `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room with some `unsigned.invite_room_state` - # but the create event does not specify a room type (normal room) - remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, - [ - StrippedStateEvent( - type=EventTypes.Create, - state_key="", - sender="@inviter:remote_server", - content={ - EventContentFields.ROOM_CREATOR: "@inviter:remote_server", - EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, - # No room type means this is a normal room - }, - ), - ], - ) - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should appear here because it is a normal room - # according to the stripped state (no room type) - self.assertEqual(filtered_room_map.keys(), {room_id, remote_invite_room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.room_lists.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - dm_room_ids=set(), - ) - ) - - # `remote_invite_room_id` should not appear here because it is a normal room - # according to the stripped state (no room type) - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - class SortRoomsTestCase(HomeserverTestCase): """ Tests Sliding Sync handler `sort_rooms()` to make sure it sorts/orders rooms diff --git a/tests/rest/client/sliding_sync/test_lists_filters.py b/tests/rest/client/sliding_sync/test_lists_filters.py new file mode 100644 index 0000000000..8df35f5f65 --- /dev/null +++ b/tests/rest/client/sliding_sync/test_lists_filters.py @@ -0,0 +1,1681 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# +import logging + +from parameterized import parameterized_class + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import ( + EventContentFields, + EventTypes, + RoomTypes, +) +from synapse.api.room_versions import RoomVersions +from synapse.events import StrippedStateEvent +from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase + +logger = logging.getLogger(__name__) + + +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) +class SlidingSyncFiltersTestCase(SlidingSyncBase): + """ + Test `filters` in the Sliding Sync API to make sure it includes/excludes rooms + correctly. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.storage_controllers = hs.get_storage_controllers() + self.account_data_handler = hs.get_account_data_handler() + + super().prepare(reactor, clock, hs) + + def test_multiple_filters_and_multiple_lists(self) -> None: + """ + Test that filters apply to `lists` in various scenarios. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a DM room + joined_dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + should_join_room=True, + ) + invited_dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + should_join_room=False, + ) + + # Create a normal room + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create a room that user1 is invited to + invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + # Absence of filters does not imply "False" values + "all": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {}, + }, + # Test single truthy filter + "dms": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": True}, + }, + # Test single falsy filter + "non-dms": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": False}, + }, + # Test how multiple filters should stack (AND'd together) + "room-invites": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": False, "is_invite": True}, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure it has the lists we requested + self.assertIncludes( + response_body["lists"].keys(), + {"all", "dms", "non-dms", "room-invites"}, + exact=True, + ) + + # Make sure the lists have the correct rooms + self.assertIncludes( + set(response_body["lists"]["all"]["ops"][0]["room_ids"]), + { + invite_room_id, + room_id, + invited_dm_room_id, + joined_dm_room_id, + }, + exact=True, + ) + self.assertIncludes( + set(response_body["lists"]["dms"]["ops"][0]["room_ids"]), + {invited_dm_room_id, joined_dm_room_id}, + exact=True, + ) + self.assertIncludes( + set(response_body["lists"]["non-dms"]["ops"][0]["room_ids"]), + {invite_room_id, room_id}, + exact=True, + ) + self.assertIncludes( + set(response_body["lists"]["room-invites"]["ops"][0]["room_ids"]), + {invite_room_id}, + exact=True, + ) + + def test_filters_regardless_of_membership_server_left_room(self) -> None: + """ + Test that filters apply to rooms regardless of membership. We're also + compounding the problem by having all of the local users leave the room causing + our server to leave the room. + + We want to make sure that if someone is filtering rooms, and leaves, you still + get that final update down sync that you left. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a normal room + room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create an encrypted space room + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + self.helper.send_state( + space_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + self.helper.join(space_room_id, user1_id, tok=user1_tok) + + # Make an initial Sliding Sync request + sync_body = { + "lists": { + "all-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": { + "is_encrypted": True, + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make sure the response has the lists we requested + self.assertListEqual( + list(response_body["lists"].keys()), + ["all-list", "foo-list"], + response_body["lists"].keys(), + ) + + # Make sure the lists have the correct rooms + self.assertListEqual( + list(response_body["lists"]["all-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id, room_id], + } + ], + ) + self.assertListEqual( + list(response_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id], + } + ], + ) + + # Everyone leaves the encrypted space room + self.helper.leave(space_room_id, user1_id, tok=user1_tok) + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + + # Make an incremental Sliding Sync request + sync_body = { + "lists": { + "all-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": { + "is_encrypted": True, + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Make sure the lists have the correct rooms even though we `newly_left` + self.assertListEqual( + list(response_body["lists"]["all-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id, room_id], + } + ], + ) + self.assertListEqual( + list(response_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 99], + "room_ids": [space_room_id], + } + ], + ) + + def test_filters_is_dm(self) -> None: + """ + Test `filter.is_dm` for DM rooms + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a normal room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a DM room + dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + ) + + # Try with `is_dm=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_dm": True, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {dm_room_id}, + exact=True, + ) + + # Try with `is_dm=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_dm": False, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted(self) -> None: + """ + Test `filters.is_encrypted` for encrypted rooms + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # No rooms are encrypted yet + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # No rooms are encrypted yet + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_server_left_room(self) -> None: + """ + Test that we can apply a `filters.is_encrypted` against a room that everyone has left. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Get a token before we create any rooms + sync_body: JsonDict = { + "lists": {}, + } + response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Leave the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + # Leave the room + self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_server_left_room2(self) -> None: + """ + Test that we can apply a `filters.is_encrypted` against a room that everyone has + left. + + There is still someone local who is invited to the rooms but that doesn't affect + whether the server is participating in the room (users need to be joined). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + _user2_tok = self.login(user2_id, "pass") + + # Get a token before we create any rooms + sync_body: JsonDict = { + "lists": {}, + } + response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Invite user2 + self.helper.invite(room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + # Invite user2 + self.helper.invite(encrypted_room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_after_we_left(self) -> None: + """ + Test that we can apply a `filters.is_encrypted` against a room that was encrypted + after we left the room (make sure we don't just use the current state) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Get a token before we create any rooms + sync_body: JsonDict = { + "lists": {}, + } + response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + # Leave the room + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create a room that will be encrypted + encrypted_after_we_left_room_id = self.helper.create_room_as( + user2_id, tok=user2_tok + ) + # Leave the room + self.helper.join(encrypted_after_we_left_room_id, user1_id, tok=user1_tok) + self.helper.leave(encrypted_after_we_left_room_id, user1_id, tok=user1_tok) + + # Encrypt the room after we've left + self.helper.send_state( + encrypted_after_we_left_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + if self.use_new_tables: + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + else: + # Even though we left the room before it was encrypted, we still see it because + # someone else on our server is still participating in the room and we "leak" + # the current state to the left user. But we consider the room encryption status + # to not be a secret given it's often set at the start of the room and it's one + # of the stripped state events that is normally handed out. + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_after_we_left_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + if self.use_new_tables: + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, encrypted_after_we_left_room_id}, + exact=True, + ) + else: + # Even though we left the room before it was encrypted... (see comment above) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_with_remote_invite_room_no_stripped_state( + self, + ) -> None: + """ + Test that we can apply a `filters.is_encrypted` filter against a remote invite + room without any `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room without any `unsigned.invite_room_state` + _remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, None + ) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear because we can't figure out whether + # it is encrypted or not (no stripped state, `unsigned.invite_room_state`). + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear because we can't figure out whether + # it is encrypted or not (no stripped state, `unsigned.invite_room_state`). + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_with_remote_invite_encrypted_room(self) -> None: + """ + Test that we can apply a `filters.is_encrypted` filter against a remote invite + encrypted room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should appear here because it is encrypted + # according to the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id, remote_invite_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear here because it is encrypted + # according to the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_with_remote_invite_unencrypted_room(self) -> None: + """ + Test that we can apply a `filters.is_encrypted` filter against a remote invite + unencrypted room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # but don't set any room encryption event. + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + # No room encryption event + ], + ) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear here because it is unencrypted + # according to the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should appear because it is unencrypted according to + # the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, remote_invite_room_id}, + exact=True, + ) + + def test_filters_is_encrypted_updated(self) -> None: + """ + Make sure we get rooms if the encrypted room status is updated for a joined room + (`filters.is_encrypted`) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # No rooms are encrypted yet + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + # Update the encryption status + self.helper.send_state( + room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # We should see the room now because it's encrypted + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_invite_rooms(self) -> None: + """ + Test `filters.is_invite` for rooms that the user has been invited to + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a normal room + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create a room that user1 is invited to + invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) + + # Try with `is_invite=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_invite": True, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {invite_room_id}, + exact=True, + ) + + # Try with `is_invite=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_invite": False, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_room_types(self) -> None: + """ + Test `filters.room_types` for different room types + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + # Create an arbitrarily typed room + foo_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": { + EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz" + } + }, + ) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + # Try finding normal rooms and spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None, RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, space_room_id}, + exact=True, + ) + + # Try finding an arbitrary room type + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": ["org.matrix.foobarbaz"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {foo_room_id}, + exact=True, + ) + + def test_filters_not_room_types(self) -> None: + """ + Test `filters.not_room_types` for different room types + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + # Create an arbitrarily typed room + foo_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": { + EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz" + } + }, + ) + + # Try finding *NOT* normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id, foo_room_id}, + exact=True, + ) + + # Try finding *NOT* spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, foo_room_id}, + exact=True, + ) + + # Try finding *NOT* normal rooms or spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_room_types": [None, RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {foo_room_id}, + exact=True, + ) + + # Test how it behaves when we have both `room_types` and `not_room_types`. + # `not_room_types` should win. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + "not_room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # Nothing matches because nothing is both a normal room and not a normal room + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + # Test how it behaves when we have both `room_types` and `not_room_types`. + # `not_room_types` should win. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None, RoomTypes.SPACE], + "not_room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + def test_filters_room_types_server_left_room(self) -> None: + """ + Test that we can apply a `filters.room_types` against a room that everyone has left. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Get a token before we create any rooms + sync_body: JsonDict = { + "lists": {}, + } + response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Leave the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Leave the room + self.helper.leave(space_room_id, user1_id, tok=user1_tok) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + def test_filter_room_types_server_left_room2(self) -> None: + """ + Test that we can apply a `filter.room_types` against a room that everyone has left. + + There is still someone local who is invited to the rooms but that doesn't affect + whether the server is participating in the room (users need to be joined). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + _user2_tok = self.login(user2_id, "pass") + + # Get a token before we create any rooms + sync_body: JsonDict = { + "lists": {}, + } + response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Invite user2 + self.helper.invite(room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Invite user2 + self.helper.invite(space_room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(space_room_id, user1_id, tok=user1_tok) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + def test_filters_room_types_with_remote_invite_room_no_stripped_state(self) -> None: + """ + Test that we can apply a `filters.room_types` filter against a remote invite + room without any `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room without any `unsigned.invite_room_state` + _remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, None + ) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + # `remote_invite_room_id` should not appear because we can't figure out what + # room type it is (no stripped state, `unsigned.invite_room_state`) + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + # `remote_invite_room_id` should not appear because we can't figure out what + # room type it is (no stripped state, `unsigned.invite_room_state`) + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + def test_filters_room_types_with_remote_invite_space(self) -> None: + """ + Test that we can apply a `filters.room_types` filter against a remote invite + to a space room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` indicating + # that it is a space room + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + # Specify that it is a space room + EventContentFields.ROOM_TYPE: RoomTypes.SPACE, + }, + ), + ], + ) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear here because it is a space room + # according to the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should appear here because it is a space room + # according to the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id, remote_invite_room_id}, + exact=True, + ) + + def test_filters_room_types_with_remote_invite_normal_room(self) -> None: + """ + Test that we can apply a `filters.room_types` filter against a remote invite + to a normal room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # but the create event does not specify a room type (normal room) + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + # No room type means this is a normal room + }, + ), + ], + ) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should appear here because it is a normal room + # according to the stripped state (no room type) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, remote_invite_room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear here because it is a normal room + # according to the stripped state (no room type) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 6dbce7126f..40743d17eb 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -1139,3 +1139,61 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): self.assertEqual( response_body["rooms"][room_id]["bump_stamp"], invite_pos.stream ) + + def test_rooms_meta_is_dm(self) -> None: + """ + Test `rooms` `is_dm` is correctly set for DM rooms. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a DM room + joined_dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + should_join_room=True, + ) + invited_dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + should_join_room=False, + ) + + # Create a normal room + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create a room that user1 is invited to + invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Ensure DM's are correctly marked + self.assertDictEqual( + { + room_id: room.get("is_dm") + for room_id, room in response_body["rooms"].items() + }, + { + invite_room_id: None, + room_id: None, + invited_dm_room_id: True, + joined_dm_room_id: True, + }, + ) diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index 9e23dbe522..fe35cbb532 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -23,11 +23,12 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import ( AccountDataTypes, - EventContentFields, EventTypes, - RoomTypes, + Membership, ) -from synapse.events import EventBase +from synapse.api.room_versions import RoomVersions +from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict +from synapse.events.snapshot import EventContext from synapse.rest.client import devices, login, receipts, room, sync from synapse.server import HomeServer from synapse.types import ( @@ -141,6 +142,167 @@ class SlidingSyncBase(unittest.HomeserverTestCase): message=str(actual_required_state), ) + def _add_new_dm_to_global_account_data( + self, source_user_id: str, target_user_id: str, target_room_id: str + ) -> None: + """ + Helper to handle inserting a new DM for the source user into global account data + (handles all of the list merging). + + Args: + source_user_id: The user ID of the DM mapping we're going to update + target_user_id: User ID of the person the DM is with + target_room_id: Room ID of the DM + """ + store = self.hs.get_datastores().main + + # Get the current DM map + existing_dm_map = self.get_success( + store.get_global_account_data_by_type_for_user( + source_user_id, AccountDataTypes.DIRECT + ) + ) + # Scrutinize the account data since it has no concrete type. We're just copying + # everything into a known type. It should be a mapping from user ID to a list of + # room IDs. Ignore anything else. + new_dm_map: Dict[str, List[str]] = {} + if isinstance(existing_dm_map, dict): + for user_id, room_ids in existing_dm_map.items(): + if isinstance(user_id, str) and isinstance(room_ids, list): + for room_id in room_ids: + if isinstance(room_id, str): + new_dm_map[user_id] = new_dm_map.get(user_id, []) + [ + room_id + ] + + # Add the new DM to the map + new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [ + target_room_id + ] + # Save the DM map to global account data + self.get_success( + store.add_account_data_for_user( + source_user_id, + AccountDataTypes.DIRECT, + new_dm_map, + ) + ) + + def _create_dm_room( + self, + inviter_user_id: str, + inviter_tok: str, + invitee_user_id: str, + invitee_tok: str, + should_join_room: bool = True, + ) -> str: + """ + Helper to create a DM room as the "inviter" and invite the "invitee" user to the + room. The "invitee" user also will join the room. The `m.direct` account data + will be set for both users. + """ + # Create a room and send an invite the other user + room_id = self.helper.create_room_as( + inviter_user_id, + is_public=False, + tok=inviter_tok, + ) + self.helper.invite( + room_id, + src=inviter_user_id, + targ=invitee_user_id, + tok=inviter_tok, + extra_data={"is_direct": True}, + ) + if should_join_room: + # Person that was invited joins the room + self.helper.join(room_id, invitee_user_id, tok=invitee_tok) + + # Mimic the client setting the room as a direct message in the global account + # data for both users. + self._add_new_dm_to_global_account_data( + invitee_user_id, inviter_user_id, room_id + ) + self._add_new_dm_to_global_account_data( + inviter_user_id, invitee_user_id, room_id + ) + + return room_id + + _remote_invite_count: int = 0 + + def _create_remote_invite_room_for_user( + self, + invitee_user_id: str, + unsigned_invite_room_state: Optional[List[StrippedStateEvent]], + ) -> str: + """ + Create a fake invite for a remote room and persist it. + + We don't have any state for these kind of rooms and can only rely on the + stripped state included in the unsigned portion of the invite event to identify + the room. + + Args: + invitee_user_id: The person being invited + unsigned_invite_room_state: List of stripped state events to assist the + receiver in identifying the room. + + Returns: + The room ID of the remote invite room + """ + store = self.hs.get_datastores().main + + invite_room_id = f"!test_room{self._remote_invite_count}:remote_server" + + invite_event_dict = { + "room_id": invite_room_id, + "sender": "@inviter:remote_server", + "state_key": invitee_user_id, + "depth": 1, + "origin_server_ts": 1, + "type": EventTypes.Member, + "content": {"membership": Membership.INVITE}, + "auth_events": [], + "prev_events": [], + } + if unsigned_invite_room_state is not None: + serialized_stripped_state_events = [] + for stripped_event in unsigned_invite_room_state: + serialized_stripped_state_events.append( + { + "type": stripped_event.type, + "state_key": stripped_event.state_key, + "sender": stripped_event.sender, + "content": stripped_event.content, + } + ) + + invite_event_dict["unsigned"] = { + "invite_room_state": serialized_stripped_state_events + } + + invite_event = make_event_from_dict( + invite_event_dict, + room_version=RoomVersions.V10, + ) + invite_event.internal_metadata.outlier = True + invite_event.internal_metadata.out_of_band_membership = True + + self.get_success( + store.maybe_store_room_on_outlier_membership( + room_id=invite_room_id, room_version=invite_event.room_version + ) + ) + context = EventContext.for_outlier(self.hs.get_storage_controllers()) + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + self.get_success(persist_controller.persist_event(invite_event, context)) + + self._remote_invite_count += 1 + + return invite_room_id + def _bump_notifier_wait_for_events( self, user_id: str, @@ -261,93 +423,6 @@ class SlidingSyncTestCase(SlidingSyncBase): super().prepare(reactor, clock, hs) - def _add_new_dm_to_global_account_data( - self, source_user_id: str, target_user_id: str, target_room_id: str - ) -> None: - """ - Helper to handle inserting a new DM for the source user into global account data - (handles all of the list merging). - - Args: - source_user_id: The user ID of the DM mapping we're going to update - target_user_id: User ID of the person the DM is with - target_room_id: Room ID of the DM - """ - - # Get the current DM map - existing_dm_map = self.get_success( - self.store.get_global_account_data_by_type_for_user( - source_user_id, AccountDataTypes.DIRECT - ) - ) - # Scrutinize the account data since it has no concrete type. We're just copying - # everything into a known type. It should be a mapping from user ID to a list of - # room IDs. Ignore anything else. - new_dm_map: Dict[str, List[str]] = {} - if isinstance(existing_dm_map, dict): - for user_id, room_ids in existing_dm_map.items(): - if isinstance(user_id, str) and isinstance(room_ids, list): - for room_id in room_ids: - if isinstance(room_id, str): - new_dm_map[user_id] = new_dm_map.get(user_id, []) + [ - room_id - ] - - # Add the new DM to the map - new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [ - target_room_id - ] - # Save the DM map to global account data - self.get_success( - self.store.add_account_data_for_user( - source_user_id, - AccountDataTypes.DIRECT, - new_dm_map, - ) - ) - - def _create_dm_room( - self, - inviter_user_id: str, - inviter_tok: str, - invitee_user_id: str, - invitee_tok: str, - should_join_room: bool = True, - ) -> str: - """ - Helper to create a DM room as the "inviter" and invite the "invitee" user to the - room. The "invitee" user also will join the room. The `m.direct` account data - will be set for both users. - """ - - # Create a room and send an invite the other user - room_id = self.helper.create_room_as( - inviter_user_id, - is_public=False, - tok=inviter_tok, - ) - self.helper.invite( - room_id, - src=inviter_user_id, - targ=invitee_user_id, - tok=inviter_tok, - extra_data={"is_direct": True}, - ) - if should_join_room: - # Person that was invited joins the room - self.helper.join(room_id, invitee_user_id, tok=invitee_tok) - - # Mimic the client setting the room as a direct message in the global account - # data for both users. - self._add_new_dm_to_global_account_data( - invitee_user_id, inviter_user_id, room_id - ) - self._add_new_dm_to_global_account_data( - inviter_user_id, invitee_user_id, room_id - ) - - return room_id - def test_sync_list(self) -> None: """ Test that room IDs show up in the Sliding Sync `lists` @@ -547,323 +622,6 @@ class SlidingSyncTestCase(SlidingSyncBase): # There should be no room sent down. self.assertFalse(channel.json_body["rooms"]) - def test_filter_list(self) -> None: - """ - Test that filters apply to `lists` - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - # Create a DM room - joined_dm_room_id = self._create_dm_room( - inviter_user_id=user1_id, - inviter_tok=user1_tok, - invitee_user_id=user2_id, - invitee_tok=user2_tok, - should_join_room=True, - ) - invited_dm_room_id = self._create_dm_room( - inviter_user_id=user1_id, - inviter_tok=user1_tok, - invitee_user_id=user2_id, - invitee_tok=user2_tok, - should_join_room=False, - ) - - # Create a normal room - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - - # Create a room that user1 is invited to - invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) - - # Make the Sliding Sync request - sync_body = { - "lists": { - # Absense of filters does not imply "False" values - "all": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {}, - }, - # Test single truthy filter - "dms": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": True}, - }, - # Test single falsy filter - "non-dms": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": False}, - }, - # Test how multiple filters should stack (AND'd together) - "room-invites": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": False, "is_invite": True}, - }, - } - } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Make sure it has the foo-list we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["all", "dms", "non-dms", "room-invites"], - response_body["lists"].keys(), - ) - - # Make sure the lists have the correct rooms - self.assertListEqual( - list(response_body["lists"]["all"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [ - invite_room_id, - room_id, - invited_dm_room_id, - joined_dm_room_id, - ], - } - ], - list(response_body["lists"]["all"]), - ) - self.assertListEqual( - list(response_body["lists"]["dms"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [invited_dm_room_id, joined_dm_room_id], - } - ], - list(response_body["lists"]["dms"]), - ) - self.assertListEqual( - list(response_body["lists"]["non-dms"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [invite_room_id, room_id], - } - ], - list(response_body["lists"]["non-dms"]), - ) - self.assertListEqual( - list(response_body["lists"]["room-invites"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [invite_room_id], - } - ], - list(response_body["lists"]["room-invites"]), - ) - - # Ensure DM's are correctly marked - self.assertDictEqual( - { - room_id: room.get("is_dm") - for room_id, room in response_body["rooms"].items() - }, - { - invite_room_id: None, - room_id: None, - invited_dm_room_id: True, - joined_dm_room_id: True, - }, - ) - - def test_filter_regardless_of_membership_server_left_room(self) -> None: - """ - Test that filters apply to rooms regardless of membership. We're also - compounding the problem by having all of the local users leave the room causing - our server to leave the room. - - We want to make sure that if someone is filtering rooms, and leaves, you still - get that final update down sync that you left. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - # Create a normal room - room_id = self.helper.create_room_as(user1_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - - # Create an encrypted space room - space_room_id = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - self.helper.send_state( - space_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user2_tok, - ) - self.helper.join(space_room_id, user1_id, tok=user1_tok) - - # Make an initial Sliding Sync request - sync_body = { - "lists": { - "all-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 0, - "filters": {}, - }, - "foo-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": { - "is_encrypted": True, - "room_types": [RoomTypes.SPACE], - }, - }, - } - } - response_body, from_token = self.do_sync(sync_body, tok=user1_tok) - - # Make sure the response has the lists we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["all-list", "foo-list"], - response_body["lists"].keys(), - ) - - # Make sure the lists have the correct rooms - self.assertListEqual( - list(response_body["lists"]["all-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id, room_id], - } - ], - ) - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id], - } - ], - ) - - # Everyone leaves the encrypted space room - self.helper.leave(space_room_id, user1_id, tok=user1_tok) - self.helper.leave(space_room_id, user2_id, tok=user2_tok) - - # Make an incremental Sliding Sync request - sync_body = { - "lists": { - "all-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 0, - "filters": {}, - }, - "foo-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": { - "is_encrypted": True, - "room_types": [RoomTypes.SPACE], - }, - }, - } - } - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - - # Make sure the lists have the correct rooms even though we `newly_left` - self.assertListEqual( - list(response_body["lists"]["all-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id, room_id], - } - ], - ) - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id], - } - ], - ) - - def test_filter_is_encrypted_up_to_date(self) -> None: - """ - Make sure we get up-to-date `is_encrypted` status for a joined room - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - sync_body = { - "lists": { - "foo-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 0, - "filters": { - "is_encrypted": True, - }, - }, - } - } - response_body, from_token = self.do_sync(sync_body, tok=user1_tok) - self.assertIncludes( - set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), - set(), - exact=True, - ) - - # Update the encryption status - self.helper.send_state( - room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - - # We should see the room now because it's encrypted - response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - self.assertIncludes( - set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), - {room_id}, - exact=True, - ) - def test_forgotten_up_to_date(self) -> None: """ Make sure we get up-to-date `forgotten` status for rooms From 1cb84aaab5231532105f5a12f01387c9cca27a6a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Sep 2024 22:36:16 +0100 Subject: [PATCH 244/332] Sliding Sync: Increase concurrency of sliding sync a bit (#17696) For initial requests a typical page size is 20 rooms, so we may as well do the batching as 20. This should speed up bigger syncs a little bit. --- changelog.d/17696.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 2 +- synapse/handlers/sliding_sync/extensions.py | 10 +++++++++- 3 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17696.misc diff --git a/changelog.d/17696.misc b/changelog.d/17696.misc new file mode 100644 index 0000000000..a2f1b1f399 --- /dev/null +++ b/changelog.d/17696.misc @@ -0,0 +1 @@ +Speed up sliding sync requests a bit where there are many room changes. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 04493494a6..c3b5bbbf6f 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -267,7 +267,7 @@ class SlidingSyncHandler: if relevant_rooms_to_send_map: with start_active_span("sliding_sync.generate_room_entries"): - await concurrently_execute(handle_room, relevant_rooms_to_send_map, 10) + await concurrently_execute(handle_room, relevant_rooms_to_send_map, 20) extensions = await self.extensions.get_extensions_response( sync_config=sync_config, diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 6f37cc3462..7c2f8a2569 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -38,6 +38,7 @@ from synapse.types.handlers.sliding_sync import ( SlidingSyncConfig, SlidingSyncResult, ) +from synapse.util.async_helpers import concurrently_execute if TYPE_CHECKING: from synapse.server import HomeServer @@ -534,7 +535,10 @@ class SlidingSyncExtensionHandler: # For rooms we've previously sent down, but aren't up to date, we # need to use the from token from the room status. if previously_rooms: - for room_id, receipt_token in previously_rooms.items(): + # Fetch any missing rooms concurrently. + + async def handle_previously_room(room_id: str) -> None: + receipt_token = previously_rooms[room_id] # TODO: Limit the number of receipts we're about to send down # for the room, if its too many we should TODO previously_receipts = ( @@ -546,6 +550,10 @@ class SlidingSyncExtensionHandler: ) fetched_receipts.extend(previously_receipts) + await concurrently_execute( + handle_previously_room, previously_rooms.keys(), 20 + ) + if initial_rooms: # We also always send down receipts for the current user. user_receipts = ( From 4ac783549c5bac7a490a715d359f330bb0b1a161 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 13 Sep 2024 02:18:19 +0100 Subject: [PATCH 245/332] Sliding Sync: Support filtering by 'tags' / 'not_tags' in SSS (#17662) This appears to be enough to make Element Web work (or at least move it on to the next hurdle) --------- Co-authored-by: Eric Eastwood --- changelog.d/17662.feature | 1 + synapse/handlers/sliding_sync/room_lists.py | 61 +++- .../client/sliding_sync/test_lists_filters.py | 310 +++++++++++++++++- 3 files changed, 369 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17662.feature diff --git a/changelog.d/17662.feature b/changelog.d/17662.feature new file mode 100644 index 0000000000..46d6037a18 --- /dev/null +++ b/changelog.d/17662.feature @@ -0,0 +1 @@ +Add support for the `tags` and `not_tags` filters for simplified sliding sync. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 50f0786374..475bfbbbcb 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -1524,6 +1524,8 @@ class SlidingSyncRoomLists: A filtered dictionary of room IDs along with membership information in the room at the time of `to_token`. """ + user_id = user.to_string() + room_id_to_stripped_state_map: Dict[ str, Optional[StateMap[StrippedStateEvent]] ] = {} @@ -1657,9 +1659,36 @@ class SlidingSyncRoomLists: # ) raise NotImplementedError() + # Filter by room tags according to the users account data if filters.tags is not None or filters.not_tags is not None: with start_active_span("filters.tags"): - raise NotImplementedError() + # Fetch the user tags for their rooms + room_tags = await self.store.get_tags_for_user(user_id) + room_id_to_tag_name_set: Dict[str, Set[str]] = { + room_id: set(tags.keys()) for room_id, tags in room_tags.items() + } + + if filters.tags is not None: + tags_set = set(filters.tags) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + # Remove rooms that don't have one of the tags in the filter + if room_id_to_tag_name_set.get(room_id, set()).intersection( + tags_set + ) + } + + if filters.not_tags is not None: + not_tags_set = set(filters.not_tags) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + # Remove rooms if they have any of the tags in the filter + if not room_id_to_tag_name_set.get(room_id, set()).intersection( + not_tags_set + ) + } # Assemble a new sync room map but only with the `filtered_room_id_set` return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} @@ -1683,6 +1712,7 @@ class SlidingSyncRoomLists: filters: Filters to apply to_token: We filter based on the state of the room at this token dm_room_ids: Set of room IDs which are DMs + room_tags: Mapping of room ID to tags Returns: A filtered dictionary of room IDs along with membership information in the @@ -1778,9 +1808,36 @@ class SlidingSyncRoomLists: # ) raise NotImplementedError() + # Filter by room tags according to the users account data if filters.tags is not None or filters.not_tags is not None: with start_active_span("filters.tags"): - raise NotImplementedError() + # Fetch the user tags for their rooms + room_tags = await self.store.get_tags_for_user(user_id) + room_id_to_tag_name_set: Dict[str, Set[str]] = { + room_id: set(tags.keys()) for room_id, tags in room_tags.items() + } + + if filters.tags is not None: + tags_set = set(filters.tags) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + # Remove rooms that don't have one of the tags in the filter + if room_id_to_tag_name_set.get(room_id, set()).intersection( + tags_set + ) + } + + if filters.not_tags is not None: + not_tags_set = set(filters.not_tags) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + # Remove rooms if they have any of the tags in the filter + if not room_id_to_tag_name_set.get(room_id, set()).intersection( + not_tags_set + ) + } # Assemble a new sync room map but only with the `filtered_room_id_set` return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} diff --git a/tests/rest/client/sliding_sync/test_lists_filters.py b/tests/rest/client/sliding_sync/test_lists_filters.py index 8df35f5f65..16e4e8edbc 100644 --- a/tests/rest/client/sliding_sync/test_lists_filters.py +++ b/tests/rest/client/sliding_sync/test_lists_filters.py @@ -25,7 +25,7 @@ from synapse.api.constants import ( ) from synapse.api.room_versions import RoomVersions from synapse.events import StrippedStateEvent -from synapse.rest.client import login, room, sync +from synapse.rest.client import login, room, sync, tags from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -60,6 +60,7 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase): login.register_servlets, room.register_servlets, sync.register_servlets, + tags.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -1148,6 +1149,27 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase): exact=True, ) + # Just make sure we know what happens when you specify an empty list of room_types + # (we should find nothing) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + def test_filters_not_room_types(self) -> None: """ Test `filters.not_room_types` for different room types @@ -1283,6 +1305,27 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase): exact=True, ) + # Just make sure we know what happens when you specify an empty list of not_room_types + # (we should find all of the rooms) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_room_types": [], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, foo_room_id, space_room_id}, + exact=True, + ) + def test_filters_room_types_server_left_room(self) -> None: """ Test that we can apply a `filters.room_types` against a room that everyone has left. @@ -1679,3 +1722,268 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase): {space_room_id}, exact=True, ) + + def _add_tag_to_room( + self, *, room_id: str, user_id: str, access_token: str, tag_name: str + ) -> None: + channel = self.make_request( + method="PUT", + path=f"/user/{user_id}/rooms/{room_id}/tags/{tag_name}", + content={}, + access_token=access_token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + def test_filters_tags(self) -> None: + """ + Test `filters.tags` for rooms with given tags + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with no tags + self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create some rooms with tags + foo_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + bar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Create a room without multiple tags + foobar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Add the "foo" tag to the foo room + self._add_tag_to_room( + room_id=foo_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="foo", + ) + # Add the "bar" tag to the bar room + self._add_tag_to_room( + room_id=bar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="bar", + ) + # Add both "foo" and "bar" tags to the foobar room + self._add_tag_to_room( + room_id=foobar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="foo", + ) + self._add_tag_to_room( + room_id=foobar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="bar", + ) + + # Try finding rooms with the "foo" tag + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "tags": ["foo"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {foo_room_id, foobar_room_id}, + exact=True, + ) + + # Try finding rooms with either "foo" or "bar" tags + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "tags": ["foo", "bar"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {foo_room_id, bar_room_id, foobar_room_id}, + exact=True, + ) + + # Try with a random tag we didn't add + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "tags": ["flomp"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # No rooms should match + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + # Just make sure we know what happens when you specify an empty list of tags + # (we should find nothing) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "tags": [], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + def test_filters_not_tags(self) -> None: + """ + Test `filters.not_tags` for excluding rooms with given tags + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with no tags + untagged_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create some rooms with tags + foo_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + bar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Create a room without multiple tags + foobar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Add the "foo" tag to the foo room + self._add_tag_to_room( + room_id=foo_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="foo", + ) + # Add the "bar" tag to the bar room + self._add_tag_to_room( + room_id=bar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="bar", + ) + # Add both "foo" and "bar" tags to the foobar room + self._add_tag_to_room( + room_id=foobar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="foo", + ) + self._add_tag_to_room( + room_id=foobar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="bar", + ) + + # Try finding rooms without the "foo" tag + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_tags": ["foo"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {untagged_room_id, bar_room_id}, + exact=True, + ) + + # Try finding rooms without either "foo" or "bar" tags + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_tags": ["foo", "bar"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {untagged_room_id}, + exact=True, + ) + + # Test how it behaves when we have both `tags` and `not_tags`. + # `not_tags` should win. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "tags": ["foo"], + "not_tags": ["foo"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # Nothing matches because nothing is both tagged with "foo" and not tagged with "foo" + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + # Just make sure we know what happens when you specify an empty list of not_tags + # (we should find all of the rooms) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_tags": [], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {untagged_room_id, foo_room_id, bar_room_id, foobar_room_id}, + exact=True, + ) From 7ed23e072e82611bc8602e6da26031926f98b573 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 18:32:01 +0100 Subject: [PATCH 246/332] Bump sentry-sdk from 2.13.0 to 2.14.0 (#17712) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4c1756c78a..b552195942 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2325,13 +2325,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.13.0" +version = "2.14.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.13.0-py2.py3-none-any.whl", hash = "sha256:6beede8fc2ab4043da7f69d95534e320944690680dd9a963178a49de71d726c6"}, - {file = "sentry_sdk-2.13.0.tar.gz", hash = "sha256:8d4a576f7a98eb2fdb40e13106e41f330e5c79d72a68be1316e7852cf4995260"}, + {file = "sentry_sdk-2.14.0-py2.py3-none-any.whl", hash = "sha256:b8bc3dc51d06590df1291b7519b85c75e2ced4f28d9ea655b6d54033503b5bf4"}, + {file = "sentry_sdk-2.14.0.tar.gz", hash = "sha256:1e0e2eaf6dad918c7d1e0edac868a7bf20017b177f242cefe2a6bcd47955961d"}, ] [package.dependencies] From 7589565eddb910a4659f160da65efcf4b4594927 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 18:32:39 +0100 Subject: [PATCH 247/332] Bump types-requests from 2.32.0.20240712 to 2.32.0.20240914 (#17713) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index b552195942..f70bde4dce 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2809,13 +2809,13 @@ files = [ [[package]] name = "types-requests" -version = "2.32.0.20240712" +version = "2.32.0.20240914" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240712.tar.gz", hash = "sha256:90c079ff05e549f6bf50e02e910210b98b8ff1ebdd18e19c873cd237737c1358"}, - {file = "types_requests-2.32.0.20240712-py3-none-any.whl", hash = "sha256:f754283e152c752e46e70942fa2a146b5bc70393522257bb85bd1ef7e019dcc3"}, + {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, + {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, ] [package.dependencies] From cf982d2e323ade8997fdc7be71122d4557b158b1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 18:51:33 +0100 Subject: [PATCH 248/332] Bump ruff from 0.6.4 to 0.6.5 (#17715) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index f70bde4dce..c1e0a6d9d3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2268,29 +2268,29 @@ files = [ [[package]] name = "ruff" -version = "0.6.4" +version = "0.6.5" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.4-py3-none-linux_armv6l.whl", hash = "sha256:c4b153fc152af51855458e79e835fb6b933032921756cec9af7d0ba2aa01a258"}, - {file = "ruff-0.6.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:bedff9e4f004dad5f7f76a9d39c4ca98af526c9b1695068198b3bda8c085ef60"}, - {file = "ruff-0.6.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d02a4127a86de23002e694d7ff19f905c51e338c72d8e09b56bfb60e1681724f"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7862f42fc1a4aca1ea3ffe8a11f67819d183a5693b228f0bb3a531f5e40336fc"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eebe4ff1967c838a1a9618a5a59a3b0a00406f8d7eefee97c70411fefc353617"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:932063a03bac394866683e15710c25b8690ccdca1cf192b9a98260332ca93408"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:50e30b437cebef547bd5c3edf9ce81343e5dd7c737cb36ccb4fe83573f3d392e"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c44536df7b93a587de690e124b89bd47306fddd59398a0fb12afd6133c7b3818"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ea086601b22dc5e7693a78f3fcfc460cceabfdf3bdc36dc898792aba48fbad6"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b52387d3289ccd227b62102c24714ed75fbba0b16ecc69a923a37e3b5e0aaaa"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0308610470fcc82969082fc83c76c0d362f562e2f0cdab0586516f03a4e06ec6"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:803b96dea21795a6c9d5bfa9e96127cc9c31a1987802ca68f35e5c95aed3fc0d"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:66dbfea86b663baab8fcae56c59f190caba9398df1488164e2df53e216248baa"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:34d5efad480193c046c86608dbba2bccdc1c5fd11950fb271f8086e0c763a5d1"}, - {file = "ruff-0.6.4-py3-none-win32.whl", hash = "sha256:f0f8968feea5ce3777c0d8365653d5e91c40c31a81d95824ba61d871a11b8523"}, - {file = "ruff-0.6.4-py3-none-win_amd64.whl", hash = "sha256:549daccee5227282289390b0222d0fbee0275d1db6d514550d65420053021a58"}, - {file = "ruff-0.6.4-py3-none-win_arm64.whl", hash = "sha256:ac4b75e898ed189b3708c9ab3fc70b79a433219e1e87193b4f2b77251d058d14"}, - {file = "ruff-0.6.4.tar.gz", hash = "sha256:ac3b5bfbee99973f80aa1b7cbd1c9cbce200883bdd067300c22a6cc1c7fba212"}, + {file = "ruff-0.6.5-py3-none-linux_armv6l.whl", hash = "sha256:7e4e308f16e07c95fc7753fc1aaac690a323b2bb9f4ec5e844a97bb7fbebd748"}, + {file = "ruff-0.6.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:932cd69eefe4daf8c7d92bd6689f7e8182571cb934ea720af218929da7bd7d69"}, + {file = "ruff-0.6.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3a8d42d11fff8d3143ff4da41742a98f8f233bf8890e9fe23077826818f8d680"}, + {file = "ruff-0.6.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a50af6e828ee692fb10ff2dfe53f05caecf077f4210fae9677e06a808275754f"}, + {file = "ruff-0.6.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:794ada3400a0d0b89e3015f1a7e01f4c97320ac665b7bc3ade24b50b54cb2972"}, + {file = "ruff-0.6.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:381413ec47f71ce1d1c614f7779d88886f406f1fd53d289c77e4e533dc6ea200"}, + {file = "ruff-0.6.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:52e75a82bbc9b42e63c08d22ad0ac525117e72aee9729a069d7c4f235fc4d276"}, + {file = "ruff-0.6.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09c72a833fd3551135ceddcba5ebdb68ff89225d30758027280968c9acdc7810"}, + {file = "ruff-0.6.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:800c50371bdcb99b3c1551d5691e14d16d6f07063a518770254227f7f6e8c178"}, + {file = "ruff-0.6.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e25ddd9cd63ba1f3bd51c1f09903904a6adf8429df34f17d728a8fa11174253"}, + {file = "ruff-0.6.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:7291e64d7129f24d1b0c947ec3ec4c0076e958d1475c61202497c6aced35dd19"}, + {file = "ruff-0.6.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9ad7dfbd138d09d9a7e6931e6a7e797651ce29becd688be8a0d4d5f8177b4b0c"}, + {file = "ruff-0.6.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:005256d977021790cc52aa23d78f06bb5090dc0bfbd42de46d49c201533982ae"}, + {file = "ruff-0.6.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:482c1e6bfeb615eafc5899127b805d28e387bd87db38b2c0c41d271f5e58d8cc"}, + {file = "ruff-0.6.5-py3-none-win32.whl", hash = "sha256:cf4d3fa53644137f6a4a27a2b397381d16454a1566ae5335855c187fbf67e4f5"}, + {file = "ruff-0.6.5-py3-none-win_amd64.whl", hash = "sha256:3e42a57b58e3612051a636bc1ac4e6b838679530235520e8f095f7c44f706ff9"}, + {file = "ruff-0.6.5-py3-none-win_arm64.whl", hash = "sha256:51935067740773afdf97493ba9b8231279e9beef0f2a8079188c4776c25688e0"}, + {file = "ruff-0.6.5.tar.gz", hash = "sha256:4d32d87fab433c0cf285c3683dd4dae63be05fd7a1d65b3f5bf7cdd05a6b96fb"}, ] [[package]] @@ -3104,4 +3104,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "26ff23a6cafd8593141cb3d54d7b1e94328a02b863d347578d2b6e666ee2bc93" +content-hash = "0c833ab57d2082e1ebe2627aef122ce4f93c1abe1f9d8739d5ea3fe52c79fa3f" diff --git a/pyproject.toml b/pyproject.toml index 7675d063b9..7a6080951a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.6.4" +ruff = "0.6.5" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From 4900438712cd1e26e22f5fbb8706f6937f809fe8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 18:52:10 +0100 Subject: [PATCH 249/332] Bump pyasn1 from 0.6.0 to 0.6.1 (#17714) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index c1e0a6d9d3..fad6dfdaa6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1632,13 +1632,13 @@ psycopg2 = "*" [[package]] name = "pyasn1" -version = "0.6.0" +version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false python-versions = ">=3.8" files = [ - {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, - {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] [[package]] From 285de43e48f9c48ccfda74d391ab32f352ec724e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 18:52:48 +0100 Subject: [PATCH 250/332] Bump anyhow from 1.0.87 to 1.0.89 (#17716) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1edc21b5f..b438b2cac0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arc-swap" From 03937a1cae18900350a6d16a2714111a2847c821 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 16 Sep 2024 13:47:35 -0500 Subject: [PATCH 251/332] Sliding Sync: Return room tags in account data extension (#17707) The account data extension was also updated to avoid copies when we pull the data out of the cache. Fix https://github.com/element-hq/synapse/issues/17694 --- changelog.d/17707.feature | 1 + synapse/handlers/sliding_sync/extensions.py | 110 ++++++++++-- .../storage/databases/main/account_data.py | 14 +- synapse/types/handlers/sliding_sync.py | 4 +- .../test_extension_account_data.py | 162 ++++++++++++++---- 5 files changed, 226 insertions(+), 65 deletions(-) create mode 100644 changelog.d/17707.feature diff --git a/changelog.d/17707.feature b/changelog.d/17707.feature new file mode 100644 index 0000000000..98a00ca34b --- /dev/null +++ b/changelog.d/17707.feature @@ -0,0 +1 @@ +Return room tags in Sliding Sync account data extension. diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 7c2f8a2569..287f4b04ad 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -14,7 +14,19 @@ import itertools import logging -from typing import TYPE_CHECKING, AbstractSet, Dict, Mapping, Optional, Sequence, Set +from typing import ( + TYPE_CHECKING, + AbstractSet, + ChainMap, + Dict, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Set, + cast, +) from typing_extensions import assert_never @@ -381,29 +393,47 @@ class SlidingSyncExtensionHandler: ) ) + # TODO: This should take into account the `from_token` and `to_token` have_push_rules_changed = await self.store.have_push_rules_changed_for_user( user_id, from_token.stream_token.push_rules_key ) if have_push_rules_changed: - global_account_data_map = dict(global_account_data_map) # TODO: This should take into account the `from_token` and `to_token` global_account_data_map[ AccountDataTypes.PUSH_RULES ] = await self.push_rules_handler.push_rules_for_user(sync_config.user) else: # TODO: This should take into account the `to_token` - all_global_account_data = await self.store.get_global_account_data_for_user( - user_id + immutable_global_account_data_map = ( + await self.store.get_global_account_data_for_user(user_id) ) - global_account_data_map = dict(all_global_account_data) - # TODO: This should take into account the `to_token` - global_account_data_map[ - AccountDataTypes.PUSH_RULES - ] = await self.push_rules_handler.push_rules_for_user(sync_config.user) + # Use a `ChainMap` to avoid copying the immutable data from the cache + global_account_data_map = ChainMap( + { + # TODO: This should take into account the `to_token` + AccountDataTypes.PUSH_RULES: await self.push_rules_handler.push_rules_for_user( + sync_config.user + ) + }, + # Cast is safe because `ChainMap` only mutates the top-most map, + # see https://github.com/python/typeshed/issues/8430 + cast( + MutableMapping[str, JsonMapping], immutable_global_account_data_map + ), + ) # Fetch room account data - account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] = {} + # + # List of -> Mapping from room_id to mapping of `type` to `content` of room + # account data events. + # + # This is is a list so we can avoid making copies of immutable data and instead + # just provide multiple maps that need to be combined. Normally, we could + # reach for `ChainMap` in this scenario, but this is a nested map and accessing + # the ChainMap by room_id won't combine the two maps for that room (we would + # need a new `NestedChainMap` type class). + account_data_by_room_maps: List[Mapping[str, Mapping[str, JsonMapping]]] = [] relevant_room_ids = self.find_relevant_room_ids_for_extension( requested_lists=account_data_request.lists, requested_room_ids=account_data_request.rooms, @@ -418,22 +448,66 @@ class SlidingSyncExtensionHandler: user_id, from_token.stream_token.account_data_key ) ) + + # Add room tags + # + # TODO: This should take into account the `from_token` and `to_token` + tags_by_room = await self.store.get_updated_tags( + user_id, from_token.stream_token.account_data_key + ) + for room_id, tags in tags_by_room.items(): + account_data_by_room_map.setdefault(room_id, {})[ + AccountDataTypes.TAG + ] = {"tags": tags} + + account_data_by_room_maps.append(account_data_by_room_map) else: # TODO: This should take into account the `to_token` - account_data_by_room_map = ( + immutable_account_data_by_room_map = ( await self.store.get_room_account_data_for_user(user_id) ) + account_data_by_room_maps.append(immutable_account_data_by_room_map) - # Filter down to the relevant rooms - account_data_by_room_map = { - room_id: account_data_map - for room_id, account_data_map in account_data_by_room_map.items() - if room_id in relevant_room_ids - } + # Add room tags + # + # TODO: This should take into account the `to_token` + tags_by_room = await self.store.get_tags_for_user(user_id) + account_data_by_room_maps.append( + { + room_id: {AccountDataTypes.TAG: {"tags": tags}} + for room_id, tags in tags_by_room.items() + } + ) + + # Filter down to the relevant rooms ... and combine the maps + relevant_account_data_by_room_map: MutableMapping[ + str, Mapping[str, JsonMapping] + ] = {} + for room_id in relevant_room_ids: + # We want to avoid adding empty maps for relevant rooms that have no room + # account data so do a quick check to see if it's in any of the maps. + is_room_in_maps = False + for room_map in account_data_by_room_maps: + if room_id in room_map: + is_room_in_maps = True + break + + # If we found the room in any of the maps, combine the maps for that room + if is_room_in_maps: + relevant_account_data_by_room_map[room_id] = ChainMap( + {}, + *( + # Cast is safe because `ChainMap` only mutates the top-most map, + # see https://github.com/python/typeshed/issues/8430 + cast(MutableMapping[str, JsonMapping], room_map[room_id]) + for room_map in account_data_by_room_maps + if room_map.get(room_id) + ), + ) return SlidingSyncResult.Extensions.AccountDataExtension( global_account_data_map=global_account_data_map, - account_data_by_room_map=account_data_by_room_map, + account_data_by_room_map=relevant_account_data_by_room_map, ) @trace diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 966393869b..b30639b4e6 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -177,7 +177,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_room_account_data_for_user_txn( txn: LoggingTransaction, - ) -> Dict[str, Dict[str, JsonDict]]: + ) -> Dict[str, Dict[str, JsonMapping]]: # The 'content != '{}' condition below prevents us from using # `simple_select_list_txn` here, as it doesn't support conditions # other than 'equals'. @@ -194,7 +194,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) txn.execute(sql, (user_id,)) - by_room: Dict[str, Dict[str, JsonDict]] = {} + by_room: Dict[str, Dict[str, JsonMapping]] = {} for room_id, account_data_type, content in txn: room_data = by_room.setdefault(room_id, {}) @@ -394,7 +394,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) async def get_updated_global_account_data_for_user( self, user_id: str, stream_id: int - ) -> Mapping[str, JsonMapping]: + ) -> Dict[str, JsonMapping]: """Get all the global account_data that's changed for a user. Args: @@ -407,7 +407,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_updated_global_account_data_for_user( txn: LoggingTransaction, - ) -> Dict[str, JsonDict]: + ) -> Dict[str, JsonMapping]: sql = """ SELECT account_data_type, content FROM account_data WHERE user_id = ? AND stream_id > ? @@ -429,7 +429,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) async def get_updated_room_account_data_for_user( self, user_id: str, stream_id: int - ) -> Dict[str, Dict[str, JsonDict]]: + ) -> Dict[str, Dict[str, JsonMapping]]: """Get all the room account_data that's changed for a user. Args: @@ -442,14 +442,14 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_updated_room_account_data_for_user_txn( txn: LoggingTransaction, - ) -> Dict[str, Dict[str, JsonDict]]: + ) -> Dict[str, Dict[str, JsonMapping]]: sql = """ SELECT room_id, account_data_type, content FROM room_account_data WHERE user_id = ? AND stream_id > ? """ txn.execute(sql, (user_id, stream_id)) - account_data_by_room: Dict[str, Dict[str, JsonDict]] = {} + account_data_by_room: Dict[str, Dict[str, JsonMapping]] = {} for row in txn: room_account_data = account_data_by_room.setdefault(row[0], {}) room_account_data[row[1]] = db_to_json(row[2]) diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index 48badeacda..149920f883 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -314,8 +314,8 @@ class SlidingSyncResult: """The Account Data extension (MSC3959) Attributes: - global_account_data_map: Mapping from `type` to `content` of global account - data events. + global_account_data_map: Mapping from `type` to `content` of global + account data events. account_data_by_room_map: Mapping from room_id to mapping of `type` to `content` of room account data events. """ diff --git a/tests/rest/client/sliding_sync/test_extension_account_data.py b/tests/rest/client/sliding_sync/test_extension_account_data.py index 6cc883a4be..03b2db39b9 100644 --- a/tests/rest/client/sliding_sync/test_extension_account_data.py +++ b/tests/rest/client/sliding_sync/test_extension_account_data.py @@ -80,18 +80,23 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): } response_body, _ = self.do_sync(sync_body, tok=user1_tok) + global_account_data_map = { + global_event["type"]: global_event["content"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + } self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, + global_account_data_map.keys(), # Even though we don't have any global account data set, Synapse saves some # default push rules for us. {AccountDataTypes.PUSH_RULES}, exact=True, ) + # Push rules are a giant chunk of JSON data so we will just assume the value is correct if they key is here. + # global_account_data_map[AccountDataTypes.PUSH_RULES] + + # No room account data for this test self.assertIncludes( response_body["extensions"]["account_data"].get("rooms").keys(), set(), @@ -121,16 +126,19 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): # There has been no account data changes since the `from_token` so we shouldn't # see any account data here. + global_account_data_map = { + global_event["type"]: global_event["content"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + } self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, + global_account_data_map.keys(), set(), exact=True, ) + + # No room account data for this test self.assertIncludes( response_body["extensions"]["account_data"].get("rooms").keys(), set(), @@ -165,16 +173,24 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # It should show us all of the global account data + global_account_data_map = { + global_event["type"]: global_event["content"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + } self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, + global_account_data_map.keys(), {AccountDataTypes.PUSH_RULES, "org.matrix.foobarbaz"}, exact=True, ) + # Push rules are a giant chunk of JSON data so we will just assume the value is correct if they key is here. + # global_account_data_map[AccountDataTypes.PUSH_RULES] + self.assertEqual( + global_account_data_map["org.matrix.foobarbaz"], {"foo": "bar"} + ) + + # No room account data for this test self.assertIncludes( response_body["extensions"]["account_data"].get("rooms").keys(), set(), @@ -220,17 +236,23 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): # Make an incremental Sliding Sync request with the account_data extension enabled response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + global_account_data_map = { + global_event["type"]: global_event["content"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + } self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, + global_account_data_map.keys(), # We should only see the new global account data that happened after the `from_token` {"org.matrix.doodardaz"}, exact=True, ) + self.assertEqual( + global_account_data_map["org.matrix.doodardaz"], {"doo": "dar"} + ) + + # No room account data for this test self.assertIncludes( response_body["extensions"]["account_data"].get("rooms").keys(), set(), @@ -255,6 +277,15 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + content={}, + ) + ) # Create another room with some room account data room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) @@ -266,6 +297,15 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + content={}, + ) + ) # Make an initial Sliding Sync request with the account_data extension enabled sync_body = { @@ -294,16 +334,21 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): {room_id1}, exact=True, ) + account_data_map = { + event["type"]: event["content"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id1) + } self.assertIncludes( - { - event["type"] - for event in response_body["extensions"]["account_data"] - .get("rooms") - .get(room_id1) - }, - {"org.matrix.roorarraz"}, + account_data_map.keys(), + {"org.matrix.roorarraz", AccountDataTypes.TAG}, exact=True, ) + self.assertEqual(account_data_map["org.matrix.roorarraz"], {"roo": "rar"}) + self.assertEqual( + account_data_map[AccountDataTypes.TAG], {"tags": {"m.favourite": {}}} + ) def test_room_account_data_incremental_sync(self) -> None: """ @@ -323,6 +368,15 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + content={}, + ) + ) # Create another room with some room account data room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) @@ -334,6 +388,15 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + content={}, + ) + ) sync_body = { "lists": {}, @@ -369,6 +432,23 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) + # Add another room tag + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.server_notice", + content={}, + ) + ) + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.server_notice", + content={}, + ) + ) # Make an incremental Sliding Sync request with the account_data extension enabled response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) @@ -383,16 +463,22 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): exact=True, ) # We should only see the new room account data that happened after the `from_token` + account_data_map = { + event["type"]: event["content"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id1) + } self.assertIncludes( - { - event["type"] - for event in response_body["extensions"]["account_data"] - .get("rooms") - .get(room_id1) - }, - {"org.matrix.roorarraz2"}, + account_data_map.keys(), + {"org.matrix.roorarraz2", AccountDataTypes.TAG}, exact=True, ) + self.assertEqual(account_data_map["org.matrix.roorarraz2"], {"roo": "rar"}) + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {"m.favourite": {}, "m.server_notice": {}}}, + ) def test_wait_for_new_data(self) -> None: """ From d10872ee755b133750fa12905fec978fb59e9c25 Mon Sep 17 00:00:00 2001 From: Olivier 'reivilibre Date: Tue, 17 Sep 2024 14:32:29 +0100 Subject: [PATCH 252/332] 1.115.0 --- CHANGES.md | 7 +++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 961169b1ba..26f9326e4c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.115.0 (2024-09-17) + +No significant changes since 1.115.0rc2. + + + + # Synapse 1.115.0rc2 (2024-09-12) ### Internal Changes diff --git a/debian/changelog b/debian/changelog index fe6ab67414..5628eec8a5 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.115.0) stable; urgency=medium + + * New Synapse release 1.115.0. + + -- Synapse Packaging team Tue, 17 Sep 2024 14:32:10 +0100 + matrix-synapse-py3 (1.115.0~rc2) stable; urgency=medium * New Synapse release 1.115.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 7675d063b9..355a31599f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.115.0rc2" +version = "1.115.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 8881ad6d4bba70541f465dd9a45e0fd0aefd5cda Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 17 Sep 2024 17:36:59 -0500 Subject: [PATCH 253/332] Sliding Sync: Short-circuit `have_finished_sliding_sync_background_jobs` (#17723) We only need to check it if returned bump stamp is `None`, which is rare. Pulling this change out from one of @erikjohnston's branches (https://github.com/element-hq/synapse/compare/develop...erikj/ss_perf) --- changelog.d/17723.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17723.misc diff --git a/changelog.d/17723.misc b/changelog.d/17723.misc new file mode 100644 index 0000000000..1f798b4ccc --- /dev/null +++ b/changelog.d/17723.misc @@ -0,0 +1 @@ +Fetch `bump_stamp`'s more efficiently in Sliding Sync. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index c3b5bbbf6f..4010f28607 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -1171,8 +1171,8 @@ class SlidingSyncHandler: # `SCHEMA_COMPAT_VERSION` and run the foreground update for # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` # (tracked by https://github.com/element-hq/synapse/issues/17623) - await self.store.have_finished_sliding_sync_background_jobs() - and latest_room_bump_stamp is None + latest_room_bump_stamp is None + and await self.store.have_finished_sliding_sync_background_jobs() ): return None From 51dd4df0a317330d0679e48d7a6dcd5abb054ec7 Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 18 Sep 2024 03:08:01 -0700 Subject: [PATCH 254/332] Add an Admin API endpoint to redact all a user's events (#17506) --- changelog.d/17506.feature | 2 + docs/admin_api/user_admin_api.md | 80 ++++++ synapse/handlers/admin.py | 190 +++++++++++- synapse/rest/admin/__init__.py | 4 + synapse/rest/admin/users.py | 99 ++++++- .../storage/databases/main/events_worker.py | 70 +++++ tests/rest/admin/test_user.py | 272 +++++++++++++++++- 7 files changed, 712 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17506.feature diff --git a/changelog.d/17506.feature b/changelog.d/17506.feature new file mode 100644 index 0000000000..dc71e43fe3 --- /dev/null +++ b/changelog.d/17506.feature @@ -0,0 +1,2 @@ +Add an asynchronous Admin API endpoint [to redact all a user's events](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#redact-all-the-events-of-a-user), +and [an endpoint to check on the status of that redaction task](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#check-the-status-of-a-redaction-process). \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 2281385830..cb38e26005 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -1361,3 +1361,83 @@ Returns a `404` HTTP status code if no user was found, with a response body like ``` _Added in Synapse 1.72.0._ + + +## Redact all the events of a user + +The API is +``` +POST /_synapse/admin/v1/user/$user_id/redact + +{ + "rooms": ["!roomid1", "!roomid2"] +} +``` +If an empty list is provided as the key for `rooms`, all events in all the rooms the user is member of will be redacted, +otherwise all the events in the rooms provided in the request will be redacted. + +The API starts redaction process running, and returns immediately with a JSON body with +a redact id which can be used to query the status of the redaction process: + +```json +{ + "redact_id": "" +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - The fully qualified MXID of the user: for example, `@user:server.com`. + +The following JSON body parameter must be provided: + +- `rooms` - A list of rooms to redact the user's events in. If an empty list is provided all events in all rooms + the user is a member of will be redacted + +_Added in Synapse 1.116.0._ + +The following JSON body parameters are optional: + +- `reason` - Reason the redaction is being requested, ie "spam", "abuse", etc. This will be included in each redaction event, and be visible to users. +- `limit` - a limit on the number of the user's events to search for ones that can be redacted (events are redacted newest to oldest) in each room, defaults to 1000 if not provided + + +## Check the status of a redaction process + +It is possible to query the status of the background task for redacting a user's events. +The status can be queried up to 24 hours after completion of the task, +or until Synapse is restarted (whichever happens first). + +The API is: + +``` +GET /_synapse/admin/v1/user/redact_status/$redact_id +``` + +A response body like the following is returned: + +``` +{ + "status": "active", + "failed_redactions": [], +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +* `redact_id` - string - The ID for this redaction process, provided when the redaction was requested. + + +**Response** + +The following fields are returned in the JSON response body: + +- `status` - string - one of scheduled/active/completed/failed, indicating the status of the redaction job +- `failed_redactions` - dictionary - the keys of the dict are event ids the process was unable to redact, if any, and the values are + the corresponding error that caused the redaction to fail + +_Added in Synapse 1.116.0._ \ No newline at end of file diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 65b3f153da..58d89080ff 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -21,13 +21,34 @@ import abc import logging -from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Set +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, +) import attr -from synapse.api.constants import Direction, Membership +from synapse.api.constants import Direction, EventTypes, Membership +from synapse.api.errors import SynapseError from synapse.events import EventBase -from synapse.types import JsonMapping, RoomStreamToken, StateMap, UserID, UserInfo +from synapse.types import ( + JsonMapping, + Requester, + RoomStreamToken, + ScheduledTask, + StateMap, + TaskStatus, + UserID, + UserInfo, + create_requester, +) from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -35,6 +56,8 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +REDACT_ALL_EVENTS_ACTION_NAME = "redact_all_events" + class AdminHandler: def __init__(self, hs: "HomeServer"): @@ -43,6 +66,20 @@ class AdminHandler: self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state self._msc3866_enabled = hs.config.experimental.msc3866.enabled + self.event_creation_handler = hs.get_event_creation_handler() + self._task_scheduler = hs.get_task_scheduler() + + self._task_scheduler.register_action( + self._redact_all_events, REDACT_ALL_EVENTS_ACTION_NAME + ) + + async def get_redact_task(self, redact_id: str) -> Optional[ScheduledTask]: + """Get the current status of an active redaction process + + Args: + redact_id: redact_id returned by start_redact_events. + """ + return await self._task_scheduler.get_task(redact_id) async def get_whois(self, user: UserID) -> JsonMapping: connections = [] @@ -313,6 +350,153 @@ class AdminHandler: return writer.finished() + async def start_redact_events( + self, + user_id: str, + rooms: list, + requester: JsonMapping, + reason: Optional[str], + limit: Optional[int], + ) -> str: + """ + Start a task redacting the events of the given user in the given rooms + + Args: + user_id: the user ID of the user whose events should be redacted + rooms: the rooms in which to redact the user's events + requester: the user requesting the events + reason: reason for requesting the redaction, ie spam, etc + limit: limit on the number of events in each room to redact + + Returns: + a unique ID which can be used to query the status of the task + """ + active_tasks = await self._task_scheduler.get_tasks( + actions=[REDACT_ALL_EVENTS_ACTION_NAME], + resource_id=user_id, + statuses=[TaskStatus.ACTIVE], + ) + + if len(active_tasks) > 0: + raise SynapseError( + 400, "Redact already in progress for user %s" % (user_id,) + ) + + if not limit: + limit = 1000 + + redact_id = await self._task_scheduler.schedule_task( + REDACT_ALL_EVENTS_ACTION_NAME, + resource_id=user_id, + params={ + "rooms": rooms, + "requester": requester, + "user_id": user_id, + "reason": reason, + "limit": limit, + }, + ) + + logger.info( + "starting redact events with redact_id %s", + redact_id, + ) + + return redact_id + + async def _redact_all_events( + self, task: ScheduledTask + ) -> Tuple[TaskStatus, Optional[Mapping[str, Any]], Optional[str]]: + """ + Task to redact all of a users events in the given rooms, tracking which, if any, events + whose redaction failed + """ + + assert task.params is not None + rooms = task.params.get("rooms") + assert rooms is not None + + r = task.params.get("requester") + assert r is not None + admin = Requester.deserialize(self._store, r) + + user_id = task.params.get("user_id") + assert user_id is not None + + requester = create_requester( + user_id, authenticated_entity=admin.user.to_string() + ) + + reason = task.params.get("reason") + limit = task.params.get("limit") + assert limit is not None + + result: Mapping[str, Any] = ( + task.result if task.result else {"failed_redactions": {}} + ) + for room in rooms: + room_version = await self._store.get_room_version(room) + event_ids = await self._store.get_events_sent_by_user_in_room( + user_id, + room, + limit, + ["m.room.member", "m.room.message"], + ) + if not event_ids: + # there's nothing to redact + return TaskStatus.COMPLETE, result, None + + events = await self._store.get_events_as_list(event_ids) + for event in events: + # we care about join events but not other membership events + if event.type == "m.room.member": + content = event.content + if content: + if content.get("membership") == Membership.JOIN: + pass + else: + continue + relations = await self._store.get_relations_for_event( + room, event.event_id, event, event_type=EventTypes.Redaction + ) + + # if we've already successfully redacted this event then skip processing it + if relations[0]: + continue + + event_dict = { + "type": EventTypes.Redaction, + "content": {"reason": reason} if reason else {}, + "room_id": room, + "sender": user_id, + } + if room_version.updated_redaction_rules: + event_dict["content"]["redacts"] = event.event_id + else: + event_dict["redacts"] = event.event_id + + try: + # set the prev event to the offending message to allow for redactions + # to be processed in the case where the user has been kicked/banned before + # redactions are requested + ( + redaction, + _, + ) = await self.event_creation_handler.create_and_send_nonmember_event( + requester, + event_dict, + prev_event_ids=[event.event_id], + ratelimit=False, + ) + except Exception as ex: + logger.info( + f"Redaction of event {event.event_id} failed due to: {ex}" + ) + result["failed_redactions"][event.event_id] = str(ex) + await self._task_scheduler.update_task(task.id, result=result) + + return TaskStatus.COMPLETE, result, None + class ExfiltrationWriter(metaclass=abc.ABCMeta): """Interface used to specify how to write exported data.""" diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index cdaee17451..4db8975674 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -98,6 +98,8 @@ from synapse.rest.admin.users import ( DeactivateAccountRestServlet, PushersRestServlet, RateLimitRestServlet, + RedactUser, + RedactUserStatus, ResetPasswordRestServlet, SearchUsersRestServlet, ShadowBanRestServlet, @@ -319,6 +321,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: UserReplaceMasterCrossSigningKeyRestServlet(hs).register(http_server) UserByExternalId(hs).register(http_server) UserByThreePid(hs).register(http_server) + RedactUser(hs).register(http_server) + RedactUserStatus(hs).register(http_server) DeviceRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 076994b87e..81dfb57a95 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -50,7 +50,7 @@ from synapse.rest.admin._base import ( from synapse.rest.client._base import client_patterns from synapse.storage.databases.main.registration import ExternalIDReuseException from synapse.storage.databases.main.stats import UserSortOrder -from synapse.types import JsonDict, JsonMapping, UserID +from synapse.types import JsonDict, JsonMapping, TaskStatus, UserID from synapse.types.rest import RequestBodyModel if TYPE_CHECKING: @@ -1405,3 +1405,100 @@ class UserByThreePid(RestServlet): raise NotFoundError("User not found") return HTTPStatus.OK, {"user_id": user_id} + + +class RedactUser(RestServlet): + """ + Redact all the events of a given user in the given rooms or if empty dict is provided + then all events in all rooms user is member of. Kicks off a background process and + returns an id that can be used to check on the progress of the redaction progress + """ + + PATTERNS = admin_patterns("/user/(?P[^/]*)/redact") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._store = hs.get_datastores().main + self.admin_handler = hs.get_admin_handler() + + async def on_POST( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + requester = await self._auth.get_user_by_req(request) + await assert_user_is_admin(self._auth, requester) + + body = parse_json_object_from_request(request, allow_empty_body=True) + rooms = body.get("rooms") + if rooms is None: + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Must provide a value for rooms." + ) + + reason = body.get("reason") + if reason: + if not isinstance(reason, str): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "If a reason is provided it must be a string.", + ) + + limit = body.get("limit") + if limit: + if not isinstance(limit, int) or limit <= 0: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "If limit is provided it must be a non-negative integer greater than 0.", + ) + + if not rooms: + rooms = await self._store.get_rooms_for_user(user_id) + + redact_id = await self.admin_handler.start_redact_events( + user_id, list(rooms), requester.serialize(), reason, limit + ) + + return HTTPStatus.OK, {"redact_id": redact_id} + + +class RedactUserStatus(RestServlet): + """ + Check on the progress of the redaction request represented by the provided ID, returning + the status of the process and a dict of events that were unable to be redacted, if any + """ + + PATTERNS = admin_patterns("/user/redact_status/(?P[^/]*)$") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self.admin_handler = hs.get_admin_handler() + + async def on_GET( + self, request: SynapseRequest, redact_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + + task = await self.admin_handler.get_redact_task(redact_id) + + if task: + if task.status == TaskStatus.ACTIVE: + return HTTPStatus.OK, {"status": TaskStatus.ACTIVE} + elif task.status == TaskStatus.COMPLETE: + assert task.result is not None + failed_redactions = task.result.get("failed_redactions") + return HTTPStatus.OK, { + "status": TaskStatus.COMPLETE, + "failed_redactions": failed_redactions if failed_redactions else {}, + } + elif task.status == TaskStatus.SCHEDULED: + return HTTPStatus.OK, {"status": TaskStatus.SCHEDULED} + else: + return HTTPStatus.OK, { + "status": TaskStatus.FAILED, + "error": ( + task.error + if task.error + else "Unknown error, please check the logs for more information." + ), + } + else: + raise NotFoundError("redact id '%s' not found" % redact_id) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 029f4bd87d..c029228422 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -2467,6 +2467,76 @@ class EventsWorkerStore(SQLBaseStore): self.invalidate_get_event_cache_after_txn(txn, event_id) + async def get_events_sent_by_user_in_room( + self, user_id: str, room_id: str, limit: int, filter: Optional[List[str]] = None + ) -> Optional[List[str]]: + """ + Get a list of event ids of events sent by the user in the specified room + + Args: + user_id: user ID to search against + room_id: room ID of the room to search for events in + filter: type of events to filter for + limit: maximum number of event ids to return + """ + + def _get_events_by_user_in_room_txn( + txn: LoggingTransaction, + user_id: str, + room_id: str, + filter: Optional[List[str]], + batch_size: int, + offset: int, + ) -> Tuple[Optional[List[str]], int]: + if filter: + base_clause, args = make_in_list_sql_clause( + txn.database_engine, "type", filter + ) + clause = f"AND {base_clause}" + parameters = (user_id, room_id, *args, batch_size, offset) + else: + clause = "" + parameters = (user_id, room_id, batch_size, offset) + + sql = f""" + SELECT event_id FROM events + WHERE sender = ? AND room_id = ? + {clause} + ORDER BY received_ts DESC + LIMIT ? + OFFSET ? + """ + txn.execute(sql, parameters) + res = txn.fetchall() + if res: + events = [row[0] for row in res] + else: + events = None + + return events, offset + batch_size + + offset = 0 + batch_size = 100 + if batch_size > limit: + batch_size = limit + + selected_ids: List[str] = [] + while offset < limit: + res, offset = await self.db_pool.runInteraction( + "get_events_by_user", + _get_events_by_user_in_room_txn, + user_id, + room_id, + filter, + batch_size, + offset, + ) + if res: + selected_ids = selected_ids + res + else: + break + return selected_ids + async def have_finished_sliding_sync_background_jobs(self) -> bool: """Return if it's safe to use the sliding sync membership tables.""" diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 16bb4349f5..ef918efe49 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -21,9 +21,11 @@ import hashlib import hmac +import json import os import urllib.parse from binascii import unhexlify +from http import HTTPStatus from typing import Dict, List, Optional from unittest.mock import AsyncMock, Mock, patch @@ -33,7 +35,7 @@ from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource import synapse.rest.admin -from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes +from synapse.api.constants import ApprovalNoticeMedium, EventTypes, LoginType, UserTypes from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError from synapse.api.room_versions import RoomVersions from synapse.media.filepath import MediaFilePaths @@ -5089,3 +5091,271 @@ class UserSuspensionTestCase(unittest.HomeserverTestCase): res5 = self.get_success(self.store.get_user_suspended_status(self.bad_user)) self.assertEqual(True, res5) + + +class UserRedactionTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + admin.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.admin = self.register_user("thomas", "pass", True) + self.admin_tok = self.login("thomas", "pass") + + self.bad_user = self.register_user("teresa", "pass") + self.bad_user_tok = self.login("teresa", "pass") + + self.store = hs.get_datastores().main + + self.spam_checker = hs.get_module_api_callbacks().spam_checker + + # create rooms - room versions 11+ store the `redacts` key in content while + # earlier ones don't so we use a mix of room versions + self.rm1 = self.helper.create_room_as( + self.admin, tok=self.admin_tok, room_version="7" + ) + self.rm2 = self.helper.create_room_as(self.admin, tok=self.admin_tok) + self.rm3 = self.helper.create_room_as( + self.admin, tok=self.admin_tok, room_version="11" + ) + + def test_redact_messages_all_rooms(self) -> None: + """ + Test that request to redact events in all rooms user is member of is successful + """ + + # join rooms, send some messages + originals = [] + for rm in [self.rm1, self.rm2, self.rm3]: + join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) + originals.append(join["event_id"]) + for i in range(15): + event = {"body": f"hello{i}", "msgtype": "m.text"} + res = self.helper.send_event( + rm, "m.room.message", event, tok=self.bad_user_tok, expect_code=200 + ) + originals.append(res["event_id"]) + + # redact all events in all rooms + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": []}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + matched = [] + for rm in [self.rm1, self.rm2, self.rm3]: + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel = self.make_request( + "GET", + f"rooms/{rm}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + for event in channel.json_body["chunk"]: + for event_id in originals: + if ( + event["type"] == "m.room.redaction" + and event["redacts"] == event_id + ): + matched.append(event_id) + self.assertEqual(len(matched), len(originals)) + + def test_redact_messages_specific_rooms(self) -> None: + """ + Test that request to redact events in specified rooms user is member of is successful + """ + + originals = [] + for rm in [self.rm1, self.rm2, self.rm3]: + join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) + originals.append(join["event_id"]) + for i in range(15): + event = {"body": f"hello{i}", "msgtype": "m.text"} + res = self.helper.send_event( + rm, "m.room.message", event, tok=self.bad_user_tok + ) + originals.append(res["event_id"]) + + # redact messages in rooms 1 and 3 + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": [self.rm1, self.rm3]}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + # messages in requested rooms are redacted + for rm in [self.rm1, self.rm3]: + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel = self.make_request( + "GET", + f"rooms/{rm}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + matches = [] + for event in channel.json_body["chunk"]: + for event_id in originals: + if ( + event["type"] == "m.room.redaction" + and event["redacts"] == event_id + ): + matches.append((event_id, event)) + # we redacted 16 messages + self.assertEqual(len(matches), 16) + + channel = self.make_request( + "GET", f"rooms/{self.rm2}/messages?limit=50", access_token=self.admin_tok + ) + self.assertEqual(channel.code, 200) + + # messages in remaining room are not + for event in channel.json_body["chunk"]: + if event["type"] == "m.room.redaction": + self.fail("found redaction in room 2") + + def test_redact_status(self) -> None: + rm2_originals = [] + for rm in [self.rm1, self.rm2, self.rm3]: + join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) + if rm == self.rm2: + rm2_originals.append(join["event_id"]) + for i in range(5): + event = {"body": f"hello{i}", "msgtype": "m.text"} + res = self.helper.send_event( + rm, "m.room.message", event, tok=self.bad_user_tok + ) + if rm == self.rm2: + rm2_originals.append(res["event_id"]) + + # redact messages in rooms 1 and 3 + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": [self.rm1, self.rm3]}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + id = channel.json_body.get("redact_id") + + channel2 = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id}", + access_token=self.admin_tok, + ) + self.assertEqual(channel2.code, 200) + self.assertEqual(channel2.json_body.get("status"), "complete") + self.assertEqual(channel2.json_body.get("failed_redactions"), {}) + + # mock that will cause persisting the redaction events to fail + async def check_event_for_spam(event: str) -> str: + return "spam" + + self.spam_checker.check_event_for_spam = check_event_for_spam # type: ignore + + channel3 = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": [self.rm2]}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + id = channel3.json_body.get("redact_id") + + channel4 = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id}", + access_token=self.admin_tok, + ) + self.assertEqual(channel4.code, 200) + self.assertEqual(channel4.json_body.get("status"), "complete") + failed_redactions = channel4.json_body.get("failed_redactions") + assert failed_redactions is not None + matched = [] + for original in rm2_originals: + if failed_redactions.get(original) is not None: + matched.append(original) + self.assertEqual(len(matched), len(rm2_originals)) + + def test_admin_redact_works_if_user_kicked_or_banned(self) -> None: + originals = [] + for rm in [self.rm1, self.rm2, self.rm3]: + join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) + originals.append(join["event_id"]) + for i in range(5): + event = {"body": f"hello{i}", "msgtype": "m.text"} + res = self.helper.send_event( + rm, "m.room.message", event, tok=self.bad_user_tok + ) + originals.append(res["event_id"]) + + # kick user from rooms 1 and 3 + for r in [self.rm1, self.rm2]: + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{r}/kick", + content={"reason": "being a bummer", "user_id": self.bad_user}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + # redact messages in room 1 and 3 + channel1 = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": [self.rm1, self.rm3]}, + access_token=self.admin_tok, + ) + self.assertEqual(channel1.code, 200) + id = channel1.json_body.get("redact_id") + + # check that there were no failed redactions in room 1 and 3 + channel2 = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id}", + access_token=self.admin_tok, + ) + self.assertEqual(channel2.code, 200) + self.assertEqual(channel2.json_body.get("status"), "complete") + failed_redactions = channel2.json_body.get("failed_redactions") + self.assertEqual(failed_redactions, {}) + + # ban user + channel3 = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{self.rm2}/ban", + content={"reason": "being a bummer", "user_id": self.bad_user}, + access_token=self.admin_tok, + ) + self.assertEqual(channel3.code, HTTPStatus.OK, channel3.result) + + # redact messages in room 2 + channel4 = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": [self.rm2]}, + access_token=self.admin_tok, + ) + self.assertEqual(channel4.code, 200) + id2 = channel1.json_body.get("redact_id") + + # check that there were no failed redactions in room 2 + channel5 = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id2}", + access_token=self.admin_tok, + ) + self.assertEqual(channel5.code, 200) + self.assertEqual(channel5.json_body.get("status"), "complete") + failed_redactions = channel5.json_body.get("failed_redactions") + self.assertEqual(failed_redactions, {}) From 3c8a116e1a36a911aa92965836444f4473044f66 Mon Sep 17 00:00:00 2001 From: Kegan Dougal <7190048+kegsay@users.noreply.github.com> Date: Wed, 18 Sep 2024 17:25:50 +0100 Subject: [PATCH 255/332] Sliding Sync: bugfix: ensure we can sync with SSS even with missing rooms (#17727) Fixes https://github.com/element-hq/element-x-ios/issues/3300 Some rooms are missing from `sliding_sync_joined_rooms`. When this happens, the first call will succeed, but any subsequent calls for this room ID will cause the cache to return `None` for the room ID, rather than not having the key at all. This then causes the `<=` check to throw. Root cause: https://github.com/element-hq/synapse/issues/17726 ### Pull Request Checklist * [x] Pull request is based on the develop branch * [ ] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [ ] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --- changelog.d/17727.bugfix | 1 + synapse/storage/databases/main/stream.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17727.bugfix diff --git a/changelog.d/17727.bugfix b/changelog.d/17727.bugfix new file mode 100644 index 0000000000..64c6e90d87 --- /dev/null +++ b/changelog.d/17727.bugfix @@ -0,0 +1 @@ +Fix a bug in SSS which could prevent /sync from working for certain user accounts. diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 94a7efee73..03b4aa3381 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1469,6 +1469,10 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): recheck_rooms: Set[str] = set() min_token = end_token.stream for room_id, stream in uncapped_results.items(): + if stream is None: + # Despite the function not directly setting None, the cache can! + # See: https://github.com/element-hq/synapse/issues/17726 + continue if stream <= min_token: results[room_id] = stream else: @@ -1495,7 +1499,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): @cachedList(cached_method_name="_get_max_event_pos", list_name="room_ids") async def _bulk_get_max_event_pos( self, room_ids: StrCollection - ) -> Mapping[str, int]: + ) -> Mapping[str, Optional[int]]: """Fetch the max position of a persisted event in the room.""" # We need to be careful not to return positions ahead of the current From 61b7c31772034fe63b311bd63d7c3d7e24551cdf Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 18 Sep 2024 13:12:14 -0500 Subject: [PATCH 256/332] Sliding Sync: Shortcut for checking if certain background updates have completed (#17724) Shortcut for checking if certain background updates have completed Pulling this change out from one of @erikjohnston's branches (https://github.com/element-hq/synapse/compare/develop...erikj/ss_perf) --------- Co-authored-by: Erik Johnston --- changelog.d/17724.misc | 1 + synapse/storage/background_updates.py | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 changelog.d/17724.misc diff --git a/changelog.d/17724.misc b/changelog.d/17724.misc new file mode 100644 index 0000000000..630443f179 --- /dev/null +++ b/changelog.d/17724.misc @@ -0,0 +1 @@ +Shortcut for checking if certain background updates have completed (utilized in Sliding Sync). diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 1194b58ffb..34139f580d 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -490,6 +490,12 @@ class BackgroundUpdater: if self._all_done: return True + # We now check if we have completed all pending background updates. We + # do this as once this returns True then it will set `self._all_done` + # and we can skip checking the database in future. + if await self.has_completed_background_updates(): + return True + rows = await self.db_pool.simple_select_many_batch( table="background_updates", column="update_name", From af998e6c660986da8385d26d724c70b5d0f77c67 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 18 Sep 2024 18:09:23 -0500 Subject: [PATCH 257/332] Sliding sync: Ignore invites from ignored users (#17729) `m.ignored_user_list` in account data --- changelog.d/17729.bugfix | 1 + synapse/handlers/sliding_sync/room_lists.py | 30 ++++- .../client/sliding_sync/test_sliding_sync.py | 113 +++++++++++++++++- 3 files changed, 142 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17729.bugfix diff --git a/changelog.d/17729.bugfix b/changelog.d/17729.bugfix new file mode 100644 index 0000000000..4ba4e551c6 --- /dev/null +++ b/changelog.d/17729.bugfix @@ -0,0 +1 @@ +Ignore invites from ignored users in Sliding Sync. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 475bfbbbcb..8457526a45 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -224,15 +224,31 @@ class SlidingSyncRoomLists: user_id ) + # Remove invites from ignored users + ignored_users = await self.store.ignored_users(user_id) + if ignored_users: + # TODO: It would be nice to avoid these copies + room_membership_for_user_map = dict(room_membership_for_user_map) + # Make a copy so we don't run into an error: `dictionary changed size during + # iteration`, when we remove items + for room_id in list(room_membership_for_user_map.keys()): + room_for_user_sliding_sync = room_membership_for_user_map[room_id] + if ( + room_for_user_sliding_sync.membership == Membership.INVITE + and room_for_user_sliding_sync.sender in ignored_users + ): + room_membership_for_user_map.pop(room_id, None) + changes = await self._get_rewind_changes_to_current_membership_to_token( sync_config.user, room_membership_for_user_map, to_token=to_token ) if changes: + # TODO: It would be nice to avoid these copies room_membership_for_user_map = dict(room_membership_for_user_map) for room_id, change in changes.items(): if change is None: # Remove rooms that the user joined after the `to_token` - room_membership_for_user_map.pop(room_id) + room_membership_for_user_map.pop(room_id, None) continue existing_room = room_membership_for_user_map.get(room_id) @@ -926,6 +942,18 @@ class SlidingSyncRoomLists: excluded_rooms=self.rooms_to_exclude_globally, ) + # Remove invites from ignored users + ignored_users = await self.store.ignored_users(user_id) + if ignored_users: + room_for_user_list = [ + room_for_user + for room_for_user in room_for_user_list + if not ( + room_for_user.membership == Membership.INVITE + and room_for_user.sender in ignored_users + ) + ] + # If the user has never joined any rooms before, we can just return an empty list if not room_for_user_list: return {}, set(), set() diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index fe35cbb532..1126258c43 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -29,7 +29,7 @@ from synapse.api.constants import ( from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict from synapse.events.snapshot import EventContext -from synapse.rest.client import devices, login, receipts, room, sync +from synapse.rest.client import account_data, devices, login, receipts, room, sync from synapse.server import HomeServer from synapse.types import ( JsonDict, @@ -413,6 +413,7 @@ class SlidingSyncTestCase(SlidingSyncBase): sync.register_servlets, devices.register_servlets, receipts.register_servlets, + account_data.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -670,6 +671,116 @@ class SlidingSyncTestCase(SlidingSyncBase): exact=True, ) + def test_ignored_user_invites_initial_sync(self) -> None: + """ + Make sure we ignore invites if they are from one of the `m.ignored_user_list` on + initial sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a room that user1 is already in + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a room that user2 is already in + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 is invited to room_id2 + self.helper.invite(room_id2, src=user2_id, targ=user1_id, tok=user2_tok) + + # Sync once before we ignore to make sure the rooms can show up + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # room_id2 shows up because we haven't ignored the user yet + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id1, room_id2}, + exact=True, + ) + + # User1 ignores user2 + channel = self.make_request( + "PUT", + f"/_matrix/client/v3/user/{user1_id}/account_data/{AccountDataTypes.IGNORED_USER_LIST}", + content={"ignored_users": {user2_id: {}}}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Sync again (initial sync) + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # The invite for room_id2 should no longer show up because user2 is ignored + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id1}, + exact=True, + ) + + def test_ignored_user_invites_incremental_sync(self) -> None: + """ + Make sure we ignore invites if they are from one of the `m.ignored_user_list` on + incremental sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a room that user1 is already in + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a room that user2 is already in + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 ignores user2 + channel = self.make_request( + "PUT", + f"/_matrix/client/v3/user/{user1_id}/account_data/{AccountDataTypes.IGNORED_USER_LIST}", + content={"ignored_users": {user2_id: {}}}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Initial sync + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # User1 only has membership in room_id1 at this point + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id1}, + exact=True, + ) + + # User1 is invited to room_id2 after the initial sync + self.helper.invite(room_id2, src=user2_id, targ=user1_id, tok=user2_tok) + + # Sync again (incremental sync) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + # The invite for room_id2 doesn't show up because user2 is ignored + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id1}, + exact=True, + ) + def test_sort_list(self) -> None: """ Test that the `lists` are sorted by `stream_ordering` From faf5b40520345d5e772ddfc71cb3f814a6509a17 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 19 Sep 2024 03:32:16 -0500 Subject: [PATCH 258/332] Sliding Sync: Fix `_bulk_get_max_event_pos(...)` being inefficient (#17728) Fix `_bulk_get_max_event_pos(...)` being inefficient. It kept adding all of the `batch_results` to the `results` over and over every time we checked a single room in the batch. I think we still ended up with the right answer before because we accumulate `recheck_rooms` and actually recheck them to overwrite the bad data we wrote to the `results` before. Introduced in https://github.com/element-hq/synapse/pull/17606/files#diff-cbd54e4b5a2a1646299d659a2d5884d6cb14e608efd2e1658e72b465bb66e31bR1481 --- changelog.d/17728.misc | 1 + synapse/storage/databases/main/stream.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17728.misc diff --git a/changelog.d/17728.misc b/changelog.d/17728.misc new file mode 100644 index 0000000000..5ab241e9df --- /dev/null +++ b/changelog.d/17728.misc @@ -0,0 +1 @@ +Fix `_bulk_get_max_event_pos` being inefficient. diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 03b4aa3381..0ab7cb8dbd 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1584,7 +1584,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) for room_id, stream_ordering in batch_results.items(): if stream_ordering <= now_token.stream: - results.update(batch_results) + results[room_id] = stream_ordering else: recheck_rooms.add(room_id) From a9c0e27eb760c0d042e84e6f7beda167611ef148 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 19 Sep 2024 03:33:34 -0500 Subject: [PATCH 259/332] Sliding Sync: No need to sort if the range is large enough to cover all of the rooms (#17731) No need to sort if the range is large enough to cover all of the rooms in the list. Previously, we would only do this optimization if the range was exactly large enough. Follow-up to https://github.com/element-hq/synapse/pull/17672 --- changelog.d/17731.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 14 +++- .../client/sliding_sync/test_lists_filters.py | 66 ++++++++----------- .../client/sliding_sync/test_rooms_meta.py | 3 +- .../client/sliding_sync/test_sliding_sync.py | 61 ++++++++++++----- 5 files changed, 87 insertions(+), 58 deletions(-) create mode 100644 changelog.d/17731.misc diff --git a/changelog.d/17731.misc b/changelog.d/17731.misc new file mode 100644 index 0000000000..d5df74b4c9 --- /dev/null +++ b/changelog.d/17731.misc @@ -0,0 +1 @@ +Small performance improvement in speeding up Sliding Sync. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 8457526a45..a423de74bf 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -373,8 +373,18 @@ class SlidingSyncRoomLists: ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] if list_config.ranges: - if list_config.ranges == [(0, len(filtered_sync_room_map) - 1)]: - # If we are asking for the full range, we don't need to sort the list. + # Optimization: If we are asking for the full range, we don't + # need to sort the list. + if ( + # We're looking for a single range that covers the entire list + len(list_config.ranges) == 1 + # Range starts at 0 + and list_config.ranges[0][0] == 0 + # And the range extends to the end of the list or more. Each + # side is inclusive. + and list_config.ranges[0][1] + >= len(filtered_sync_room_map) - 1 + ): sorted_room_info: List[RoomsForUserType] = list( filtered_sync_room_map.values() ) diff --git a/tests/rest/client/sliding_sync/test_lists_filters.py b/tests/rest/client/sliding_sync/test_lists_filters.py index 16e4e8edbc..c59f6aedc4 100644 --- a/tests/rest/client/sliding_sync/test_lists_filters.py +++ b/tests/rest/client/sliding_sync/test_lists_filters.py @@ -230,32 +230,21 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase): response_body, from_token = self.do_sync(sync_body, tok=user1_tok) # Make sure the response has the lists we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["all-list", "foo-list"], + self.assertIncludes( response_body["lists"].keys(), + {"all-list", "foo-list"}, ) # Make sure the lists have the correct rooms - self.assertListEqual( - list(response_body["lists"]["all-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id, room_id], - } - ], + self.assertIncludes( + set(response_body["lists"]["all-list"]["ops"][0]["room_ids"]), + {space_room_id, room_id}, + exact=True, ) - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id], - } - ], + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, ) # Everyone leaves the encrypted space room @@ -284,26 +273,23 @@ class SlidingSyncFiltersTestCase(SlidingSyncBase): } response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - # Make sure the lists have the correct rooms even though we `newly_left` - self.assertListEqual( - list(response_body["lists"]["all-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id, room_id], - } - ], + # Make sure the response has the lists we requested + self.assertIncludes( + response_body["lists"].keys(), + {"all-list", "foo-list"}, + exact=True, ) - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id], - } - ], + + # Make sure the lists have the correct rooms even though we `newly_left` + self.assertIncludes( + set(response_body["lists"]["all-list"]["ops"][0]["room_ids"]), + {space_room_id, room_id}, + exact=True, + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, ) def test_filters_is_dm(self) -> None: diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 40743d17eb..8d6039715f 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -935,7 +935,8 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): op = response_body["lists"]["foo-list"]["ops"][0] self.assertEqual(op["op"], "SYNC") self.assertEqual(op["range"], [0, 1]) - # Note that we don't order the ops anymore, so we need to compare sets. + # Note that we don't sort the rooms when the range includes all of the rooms, so + # we just assert that the rooms are included self.assertIncludes(set(op["room_ids"]), {room_id1, room_id2}, exact=True) # The `bump_stamp` for room1 should point at the latest message (not the diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index 1126258c43..c2cfb29866 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -797,7 +797,38 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id1, "activity in room1", tok=user1_tok) self.helper.send(room_id2, "activity in room2", tok=user1_tok) - # Make the Sliding Sync request + # Make the Sliding Sync request where the range includes *some* of the rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 1, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure it has the foo-list we requested + self.assertIncludes( + response_body["lists"].keys(), + {"foo-list"}, + ) + # Make sure the list is sorted in the way we expect (we only sort when the range + # doesn't include all of the room) + self.assertListEqual( + list(response_body["lists"]["foo-list"]["ops"]), + [ + { + "op": "SYNC", + "range": [0, 1], + "room_ids": [room_id2, room_id1], + } + ], + response_body["lists"]["foo-list"], + ) + + # Make the Sliding Sync request where the range includes *all* of the rooms sync_body = { "lists": { "foo-list": { @@ -810,24 +841,24 @@ class SlidingSyncTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Make sure it has the foo-list we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["foo-list"], + self.assertIncludes( response_body["lists"].keys(), + {"foo-list"}, ) - - # Make sure the list is sorted in the way we expect - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [room_id2, room_id1, room_id3], - } - ], + # Since the range includes all of the rooms, we don't sort the list + self.assertEqual( + len(response_body["lists"]["foo-list"]["ops"]), + 1, response_body["lists"]["foo-list"], ) + op = response_body["lists"]["foo-list"]["ops"][0] + self.assertEqual(op["op"], "SYNC") + self.assertEqual(op["range"], [0, 99]) + # Note that we don't sort the rooms when the range includes all of the rooms, so + # we just assert that the rooms are included + self.assertIncludes( + set(op["room_ids"]), {room_id1, room_id2, room_id3}, exact=True + ) def test_sliced_windows(self) -> None: """ From 83fc225030253fc222af62fc96c9f4db8d4738e8 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 19 Sep 2024 06:43:26 -0500 Subject: [PATCH 260/332] Sliding Sync: Add cache to `get_tags_for_room(...)` (#17730) Add cache to `get_tags_for_room(...)` This helps Sliding Sync because `get_tags_for_room(...)` is going to be used in https://github.com/element-hq/synapse/pull/17695 Essentially, we're just trying to match `get_account_data_for_room(...)` which already has a tree cache. --- changelog.d/17730.misc | 1 + synapse/handlers/account_data.py | 4 ++-- synapse/storage/databases/main/cache.py | 1 + synapse/storage/databases/main/tags.py | 19 ++++++++++++++++--- .../test_resource_limits_server_notices.py | 2 +- 5 files changed, 21 insertions(+), 6 deletions(-) create mode 100644 changelog.d/17730.misc diff --git a/changelog.d/17730.misc b/changelog.d/17730.misc new file mode 100644 index 0000000000..56da7bfd1a --- /dev/null +++ b/changelog.d/17730.misc @@ -0,0 +1 @@ +Add cache to `get_tags_for_room(...)`. diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 97a463d8d0..228132db48 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -33,7 +33,7 @@ from synapse.replication.http.account_data import ( ReplicationRemoveUserAccountDataRestServlet, ) from synapse.streams import EventSource -from synapse.types import JsonDict, StrCollection, StreamKeyType, UserID +from synapse.types import JsonDict, JsonMapping, StrCollection, StreamKeyType, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -253,7 +253,7 @@ class AccountDataHandler: return response["max_stream_id"] async def add_tag_to_room( - self, user_id: str, room_id: str, tag: str, content: JsonDict + self, user_id: str, room_id: str, tag: str, content: JsonMapping ) -> int: """Add a tag to a room for a user. diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 37c865a8e7..32c3472e58 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -471,6 +471,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_account_data_for_room", None) self._attempt_to_invalidate_cache("get_account_data_for_room_and_type", None) + self._attempt_to_invalidate_cache("get_tags_for_room", None) self._attempt_to_invalidate_cache("get_aliases_for_room", (room_id,)) self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,)) self._attempt_to_invalidate_cache("_get_forward_extremeties_for_room", None) diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index b5af294384..b498cb9625 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -158,9 +158,10 @@ class TagsWorkerStore(AccountDataWorkerStore): return results + @cached(num_args=2, tree=True) async def get_tags_for_room( self, user_id: str, room_id: str - ) -> Dict[str, JsonDict]: + ) -> Mapping[str, JsonMapping]: """Get all the tags for the given room Args: @@ -182,7 +183,7 @@ class TagsWorkerStore(AccountDataWorkerStore): return {tag: db_to_json(content) for tag, content in rows} async def add_tag_to_room( - self, user_id: str, room_id: str, tag: str, content: JsonDict + self, user_id: str, room_id: str, tag: str, content: JsonMapping ) -> int: """Add a tag to a room for a user. @@ -213,6 +214,7 @@ class TagsWorkerStore(AccountDataWorkerStore): await self.db_pool.runInteraction("add_tag", add_tag_txn, next_id) self.get_tags_for_user.invalidate((user_id,)) + self.get_tags_for_room.invalidate((user_id, room_id)) return self._account_data_id_gen.get_current_token() @@ -237,6 +239,7 @@ class TagsWorkerStore(AccountDataWorkerStore): await self.db_pool.runInteraction("remove_tag", remove_tag_txn, next_id) self.get_tags_for_user.invalidate((user_id,)) + self.get_tags_for_room.invalidate((user_id, room_id)) return self._account_data_id_gen.get_current_token() @@ -290,9 +293,19 @@ class TagsWorkerStore(AccountDataWorkerStore): rows: Iterable[Any], ) -> None: if stream_name == AccountDataStream.NAME: - for row in rows: + # Cast is safe because the `AccountDataStream` should only be giving us + # `AccountDataStreamRow` + account_data_stream_rows: List[AccountDataStream.AccountDataStreamRow] = ( + cast(List[AccountDataStream.AccountDataStreamRow], rows) + ) + + for row in account_data_stream_rows: if row.data_type == AccountDataTypes.TAG: self.get_tags_for_user.invalidate((row.user_id,)) + if row.room_id: + self.get_tags_for_room.invalidate((row.user_id, row.room_id)) + else: + self.get_tags_for_room.invalidate((row.user_id,)) self._account_data_stream_cache.entity_has_changed( row.user_id, token ) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 0e3e4f7293..997ee7b91b 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -89,7 +89,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): return_value="!something:localhost" ) self._rlsn._store.add_tag_to_room = AsyncMock(return_value=None) # type: ignore[method-assign] - self._rlsn._store.get_tags_for_room = AsyncMock(return_value={}) # type: ignore[method-assign] + self._rlsn._store.get_tags_for_room = AsyncMock(return_value={}) @override_config({"hs_disabled": True}) def test_maybe_send_server_notice_disabled_hs(self) -> None: From 07a51d2a564a83b8e285ab2990d2b466ab327171 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Sep 2024 16:01:11 +0300 Subject: [PATCH 261/332] Fix sliding sync for rooms with unknown room version (#17733) Follow on from #17727 --- changelog.d/17733.bugfix | 1 + synapse/handlers/sliding_sync/room_lists.py | 10 ++++ synapse/storage/databases/main/roommember.py | 5 ++ .../client/sliding_sync/test_rooms_meta.py | 52 +++++++++++++++++++ 4 files changed, 68 insertions(+) create mode 100644 changelog.d/17733.bugfix diff --git a/changelog.d/17733.bugfix b/changelog.d/17733.bugfix new file mode 100644 index 0000000000..64c6e90d87 --- /dev/null +++ b/changelog.d/17733.bugfix @@ -0,0 +1 @@ +Fix a bug in SSS which could prevent /sync from working for certain user accounts. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index a423de74bf..353c491f72 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -40,6 +40,7 @@ from synapse.api.constants import ( EventTypes, Membership, ) +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import StrippedStateEvent from synapse.events.utils import parse_stripped_state_event from synapse.logging.opentracing import start_active_span, trace @@ -952,6 +953,15 @@ class SlidingSyncRoomLists: excluded_rooms=self.rooms_to_exclude_globally, ) + # We filter out unknown room versions before we try and load any + # metadata about the room. They shouldn't go down sync anyway, and their + # metadata may be in a broken state. + room_for_user_list = [ + room_for_user + for room_for_user in room_for_user_list + if room_for_user.room_version_id in KNOWN_ROOM_VERSIONS + ] + # Remove invites from ignored users ignored_users = await self.store.ignored_users(user_id) if ignored_users: diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index e321a1add2..ded7948713 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -41,6 +41,7 @@ import attr from synapse.api.constants import EventTypes, Membership from synapse.api.errors import Codes, SynapseError +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.logging.opentracing import trace from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -1443,6 +1444,10 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): is_encrypted=bool(row[9]), ) for row in txn + # We filter out unknown room versions proactively. They + # shouldn't go down sync and their metadata may be in a broken + # state (causing errors). + if row[4] in KNOWN_ROOM_VERSIONS } return await self.db_pool.runInteraction( diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index 8d6039715f..c619dd83fb 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -1198,3 +1198,55 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): joined_dm_room_id: True, }, ) + + def test_old_room_with_unknown_room_version(self) -> None: + """Test that an old room with unknown room version does not break + sync.""" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # We first create a standard room, then we'll change the room version in + # the DB. + room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + # Poke the database and update the room version to an unknown one. + self.get_success( + self.hs.get_datastores().main.db_pool.simple_update( + "rooms", + keyvalues={"room_id": room_id}, + updatevalues={"room_version": "unknown-room-version"}, + desc="updated-room-version", + ) + ) + + # Invalidate method so that it returns the currently updated version + # instead of the cached version. + self.hs.get_datastores().main.get_room_version_id.invalidate((room_id,)) + + # For old unknown room versions we won't have an entry in this table + # (due to us skipping unknown room versions in the background update). + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + desc="delete_sliding_room", + ) + ) + + # Also invalidate some caches to ensure we pull things from the DB. + self.store._events_stream_cache._entity_to_key.pop(room_id) + self.store._get_max_event_pos.invalidate((room_id,)) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 5, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) From c2e5e9e67c24264f5a12bf3ceaa9c4e195547d26 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 19 Sep 2024 10:07:18 -0500 Subject: [PATCH 262/332] Sliding Sync: Avoid fetching left rooms and add back `newly_left` rooms (#17725) Performance optimization: We can avoid fetching rooms that the user has left themselves (which could be a significant amount), then only add back rooms that the user has `newly_left` (left in the token range of an incremental sync). It's a lot faster to fetch less rooms than fetch them all and throw them away in most cases. Since the user only leaves a room (or is state reset out) once in a blue moon, we can avoid a lot of work. Based on @erikjohnston's branch, erikj/ss_perf --------- Co-authored-by: Erik Johnston --- changelog.d/17725.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 26 +- synapse/handlers/sliding_sync/room_lists.py | 279 ++++++---- synapse/storage/databases/main/roommember.py | 46 +- synapse/storage/databases/main/state.py | 20 +- synapse/storage/databases/main/stream.py | 7 + .../client/sliding_sync/test_sliding_sync.py | 479 +++++++++++++++++- tests/storage/test_stream.py | 85 +++- 8 files changed, 833 insertions(+), 110 deletions(-) create mode 100644 changelog.d/17725.misc diff --git a/changelog.d/17725.misc b/changelog.d/17725.misc new file mode 100644 index 0000000000..2a53bb1491 --- /dev/null +++ b/changelog.d/17725.misc @@ -0,0 +1 @@ +More efficiently fetch rooms for Sliding Sync. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 4010f28607..5206af22ec 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -495,6 +495,24 @@ class SlidingSyncHandler: room_sync_config.timeline_limit, ) + # Handle state resets. For example, if we see + # `room_membership_for_user_at_to_token.event_id=None and + # room_membership_for_user_at_to_token.membership is not None`, we should + # indicate to the client that a state reset happened. Perhaps we should indicate + # this by setting `initial: True` and empty `required_state: []`. + state_reset_out_of_room = False + if ( + room_membership_for_user_at_to_token.event_id is None + and room_membership_for_user_at_to_token.membership is not None + ): + # We only expect the `event_id` to be `None` if you've been state reset out + # of the room (meaning you're no longer in the room). We could put this as + # part of the if-statement above but we want to handle every case where + # `event_id` is `None`. + assert room_membership_for_user_at_to_token.membership is Membership.LEAVE + + state_reset_out_of_room = True + # Determine whether we should limit the timeline to the token range. # # We should return historical messages (before token range) in the @@ -527,7 +545,7 @@ class SlidingSyncHandler: from_bound = None initial = True ignore_timeline_bound = False - if from_token and not newly_joined: + if from_token and not newly_joined and not state_reset_out_of_room: room_status = previous_connection_state.rooms.have_sent_room(room_id) if room_status.status == HaveSentRoomFlag.LIVE: from_bound = from_token.stream_token.room_key @@ -732,12 +750,6 @@ class SlidingSyncHandler: stripped_state.append(strip_event(invite_or_knock_event)) - # TODO: Handle state resets. For example, if we see - # `room_membership_for_user_at_to_token.event_id=None and - # room_membership_for_user_at_to_token.membership is not None`, we should - # indicate to the client that a state reset happened. Perhaps we should indicate - # this by setting `initial: True` and empty `required_state`. - # Get the changes to current state in the token range from the # `current_state_delta_stream` table. # diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 353c491f72..bf19eb735b 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -56,7 +56,6 @@ from synapse.storage.roommember import ( ) from synapse.types import ( MutableStateMap, - PersistedEventPosition, RoomStreamToken, StateMap, StrCollection, @@ -81,6 +80,12 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +class Sentinel(enum.Enum): + # defining a sentinel in this way allows mypy to correctly handle the + # type of a dictionary lookup and subsequent type narrowing. + UNSET_SENTINEL = object() + + # Helper definition for the types that we might return. We do this to avoid # copying data between types (which can be expensive for many rooms). RoomsForUserType = Union[RoomsForUserStateReset, RoomsForUser, RoomsForUserSlidingSync] @@ -119,12 +124,6 @@ class SlidingSyncInterestedRooms: dm_room_ids: AbstractSet[str] -class Sentinel(enum.Enum): - # defining a sentinel in this way allows mypy to correctly handle the - # type of a dictionary lookup and subsequent type narrowing. - UNSET_SENTINEL = object() - - def filter_membership_for_sync( *, user_id: str, @@ -221,6 +220,9 @@ class SlidingSyncRoomLists: # include rooms that are outside the list ranges. all_rooms: Set[str] = set() + # Note: this won't include rooms the user has left themselves. We add back + # `newly_left` rooms below. This is more efficient than fetching all rooms and + # then filtering out the old left rooms. room_membership_for_user_map = await self.store.get_sliding_sync_rooms_for_user( user_id ) @@ -262,36 +264,11 @@ class SlidingSyncRoomLists: event_id=change.event_id, event_pos=change.event_pos, room_version_id=change.room_version_id, - # We keep the current state of the room though + # We keep the state of the room though has_known_state=existing_room.has_known_state, room_type=existing_room.room_type, is_encrypted=existing_room.is_encrypted, ) - else: - # This can happen if we get "state reset" out of the room - # after the `to_token`. In other words, there is no membership - # for the room after the `to_token` but we see membership in - # the token range. - - # Get the state at the time. Note that room type never changes, - # so we can just get current room type - room_type = await self.store.get_room_type(room_id) - is_encrypted = await self.get_is_encrypted_for_room_at_token( - room_id, to_token.room_key - ) - - # Add back rooms that the user was state-reset out of after `to_token` - room_membership_for_user_map[room_id] = RoomsForUserSlidingSync( - room_id=room_id, - sender=change.sender, - membership=change.membership, - event_id=change.event_id, - event_pos=change.event_pos, - room_version_id=change.room_version_id, - has_known_state=True, - room_type=room_type, - is_encrypted=is_encrypted, - ) ( newly_joined_room_ids, @@ -301,44 +278,88 @@ class SlidingSyncRoomLists: ) dm_room_ids = await self._get_dm_rooms_for_user(user_id) - # Handle state resets in the from -> to token range. - state_reset_rooms = ( + # Add back `newly_left` rooms (rooms left in the from -> to token range). + # + # We do this because `get_sliding_sync_rooms_for_user(...)` doesn't include + # rooms that the user left themselves as it's more efficient to add them back + # here than to fetch all rooms and then filter out the old left rooms. The user + # only leaves a room once in a blue moon so this barely needs to run. + # + missing_newly_left_rooms = ( newly_left_room_map.keys() - room_membership_for_user_map.keys() ) - if state_reset_rooms: + if missing_newly_left_rooms: + # TODO: It would be nice to avoid these copies room_membership_for_user_map = dict(room_membership_for_user_map) - for room_id in ( - newly_left_room_map.keys() - room_membership_for_user_map.keys() - ): - # Get the state at the time. Note that room type never changes, - # so we can just get current room type - room_type = await self.store.get_room_type(room_id) - is_encrypted = await self.get_is_encrypted_for_room_at_token( - room_id, newly_left_room_map[room_id].to_room_stream_token() - ) + for room_id in missing_newly_left_rooms: + newly_left_room_for_user = newly_left_room_map[room_id] + # This should be a given + assert newly_left_room_for_user.membership == Membership.LEAVE - room_membership_for_user_map[room_id] = RoomsForUserSlidingSync( - room_id=room_id, - sender=None, - membership=Membership.LEAVE, - event_id=None, - event_pos=newly_left_room_map[room_id], - room_version_id=await self.store.get_room_version_id(room_id), - has_known_state=True, - room_type=room_type, - is_encrypted=is_encrypted, + # Add back `newly_left` rooms + # + # Check for membership and state in the Sliding Sync tables as it's just + # another membership + newly_left_room_for_user_sliding_sync = ( + await self.store.get_sliding_sync_room_for_user(user_id, room_id) ) + # If the membership exists, it's just a normal user left the room on + # their own + if newly_left_room_for_user_sliding_sync is not None: + room_membership_for_user_map[room_id] = ( + newly_left_room_for_user_sliding_sync + ) + + change = changes.get(room_id) + if change is not None: + # Update room membership events to the point in time of the `to_token` + room_membership_for_user_map[room_id] = RoomsForUserSlidingSync( + room_id=room_id, + sender=change.sender, + membership=change.membership, + event_id=change.event_id, + event_pos=change.event_pos, + room_version_id=change.room_version_id, + # We keep the state of the room though + has_known_state=newly_left_room_for_user_sliding_sync.has_known_state, + room_type=newly_left_room_for_user_sliding_sync.room_type, + is_encrypted=newly_left_room_for_user_sliding_sync.is_encrypted, + ) + + # If we are `newly_left` from the room but can't find any membership, + # then we have been "state reset" out of the room + else: + # Get the state at the time. We can't read from the Sliding Sync + # tables because the user has no membership in the room according to + # the state (thanks to the state reset). + # + # Note: `room_type` never changes, so we can just get current room + # type + room_type = await self.store.get_room_type(room_id) + has_known_state = room_type is not ROOM_UNKNOWN_SENTINEL + if isinstance(room_type, StateSentinel): + room_type = None + + # Get the encryption status at the time of the token + is_encrypted = await self.get_is_encrypted_for_room_at_token( + room_id, + newly_left_room_for_user.event_pos.to_room_stream_token(), + ) + + room_membership_for_user_map[room_id] = RoomsForUserSlidingSync( + room_id=room_id, + sender=newly_left_room_for_user.sender, + membership=newly_left_room_for_user.membership, + event_id=newly_left_room_for_user.event_id, + event_pos=newly_left_room_for_user.event_pos, + room_version_id=newly_left_room_for_user.room_version_id, + has_known_state=has_known_state, + room_type=room_type, + is_encrypted=is_encrypted, + ) if sync_config.lists: - sync_room_map = { - room_id: room_membership_for_user - for room_id, room_membership_for_user in room_membership_for_user_map.items() - if filter_membership_for_sync( - user_id=user_id, - room_membership_for_user=room_membership_for_user, - newly_left=room_id in newly_left_room_map, - ) - } + sync_room_map = room_membership_for_user_map with start_active_span("assemble_sliding_window_lists"): for list_key, list_config in sync_config.lists.items(): # Apply filters @@ -347,6 +368,7 @@ class SlidingSyncRoomLists: filtered_sync_room_map = await self.filter_rooms_using_tables( user_id, sync_room_map, + previous_connection_state, list_config.filters, to_token, dm_room_ids, @@ -446,6 +468,9 @@ class SlidingSyncRoomLists: if sync_config.room_subscriptions: with start_active_span("assemble_room_subscriptions"): + # TODO: It would be nice to avoid these copies + room_membership_for_user_map = dict(room_membership_for_user_map) + # Find which rooms are partially stated and may need to be filtered out # depending on the `required_state` requested (see below). partial_state_rooms = await self.store.get_partial_rooms() @@ -454,10 +479,20 @@ class SlidingSyncRoomLists: room_id, room_subscription, ) in sync_config.room_subscriptions.items(): - if room_id not in room_membership_for_user_map: + # Check if we have a membership for the room, but didn't pull it out + # above. This could be e.g. a leave that we don't pull out by + # default. + current_room_entry = ( + await self.store.get_sliding_sync_room_for_user( + user_id, room_id + ) + ) + if not current_room_entry: # TODO: Handle rooms the user isn't in. continue + room_membership_for_user_map[room_id] = current_room_entry + all_rooms.add(room_id) # Take the superset of the `RoomSyncConfig` for each room. @@ -471,8 +506,6 @@ class SlidingSyncRoomLists: if room_id in partial_state_rooms: continue - all_rooms.add(room_id) - # Update our `relevant_room_map` with the room we're going to display # and need to fetch more info about. existing_room_sync_config = relevant_room_map.get(room_id) @@ -487,7 +520,7 @@ class SlidingSyncRoomLists: # Filtered subset of `relevant_room_map` for rooms that may have updates # (in the event stream) - relevant_rooms_to_send_map = await self._filter_relevant_room_to_send( + relevant_rooms_to_send_map = await self._filter_relevant_rooms_to_send( previous_connection_state, from_token, relevant_room_map ) @@ -544,6 +577,7 @@ class SlidingSyncRoomLists: filtered_sync_room_map = await self.filter_rooms( sync_config.user, sync_room_map, + previous_connection_state, list_config.filters, to_token, dm_room_ids, @@ -674,7 +708,7 @@ class SlidingSyncRoomLists: # Filtered subset of `relevant_room_map` for rooms that may have updates # (in the event stream) - relevant_rooms_to_send_map = await self._filter_relevant_room_to_send( + relevant_rooms_to_send_map = await self._filter_relevant_rooms_to_send( previous_connection_state, from_token, relevant_room_map ) @@ -689,7 +723,7 @@ class SlidingSyncRoomLists: dm_room_ids=dm_room_ids, ) - async def _filter_relevant_room_to_send( + async def _filter_relevant_rooms_to_send( self, previous_connection_state: PerConnectionState, from_token: Optional[StreamToken], @@ -974,8 +1008,17 @@ class SlidingSyncRoomLists: ) ] - # If the user has never joined any rooms before, we can just return an empty list - if not room_for_user_list: + ( + newly_joined_room_ids, + newly_left_room_map, + ) = await self._get_newly_joined_and_left_rooms( + user_id, to_token=to_token, from_token=from_token + ) + + # If the user has never joined any rooms before, we can just return an empty + # list. We also have to check the `newly_left_room_map` in case someone was + # state reset out of all of the rooms they were in. + if not room_for_user_list and not newly_left_room_map: return {}, set(), set() # Since we fetched the users room list at some point in time after the @@ -993,30 +1036,22 @@ class SlidingSyncRoomLists: else: rooms_for_user[room_id] = change_room_for_user - ( - newly_joined_room_ids, - newly_left_room_ids, - ) = await self._get_newly_joined_and_left_rooms( - user_id, to_token=to_token, from_token=from_token - ) - # Ensure we have entries for rooms that the user has been "state reset" # out of. These are rooms appear in the `newly_left_rooms` map but # aren't in the `rooms_for_user` map. - for room_id, left_event_pos in newly_left_room_ids.items(): + for room_id, newly_left_room_for_user in newly_left_room_map.items(): + # If we already know about the room, it's not a state reset if room_id in rooms_for_user: continue - rooms_for_user[room_id] = RoomsForUserStateReset( - room_id=room_id, - event_id=None, - event_pos=left_event_pos, - membership=Membership.LEAVE, - sender=None, - room_version_id=await self.store.get_room_version_id(room_id), - ) + # This should be true if it's a state reset + assert newly_left_room_for_user.membership is Membership.LEAVE + assert newly_left_room_for_user.event_id is None + assert newly_left_room_for_user.sender is None - return rooms_for_user, newly_joined_room_ids, set(newly_left_room_ids) + rooms_for_user[room_id] = newly_left_room_for_user + + return rooms_for_user, newly_joined_room_ids, set(newly_left_room_map) @trace async def _get_newly_joined_and_left_rooms( @@ -1024,7 +1059,7 @@ class SlidingSyncRoomLists: user_id: str, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Tuple[AbstractSet[str], Mapping[str, PersistedEventPosition]]: + ) -> Tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]: """Fetch the sets of rooms that the user newly joined or left in the given token range. @@ -1033,11 +1068,18 @@ class SlidingSyncRoomLists: "current memberships" of the user. Returns: - A 2-tuple of newly joined room IDs and a map of newly left room - IDs to the event position the leave happened at. + A 2-tuple of newly joined room IDs and a map of newly_left room + IDs to the `RoomsForUserStateReset` entry. + + We're using `RoomsForUserStateReset` but that doesn't necessarily mean the + user was state reset of the rooms. It's just that the `event_id`/`sender` + are optional and we can't tell the difference between the server leaving the + room when the user was the last person participating in the room and left or + was state reset out of the room. To actually check for a state reset, you + need to check if a membership still exists in the room. """ newly_joined_room_ids: Set[str] = set() - newly_left_room_map: Dict[str, PersistedEventPosition] = {} + newly_left_room_map: Dict[str, RoomsForUserStateReset] = {} # We need to figure out the # @@ -1108,8 +1150,13 @@ class SlidingSyncRoomLists: # 1) Figure out newly_left rooms (> `from_token` and <= `to_token`). if last_membership_change_in_from_to_range.membership == Membership.LEAVE: # 1) Mark this room as `newly_left` - newly_left_room_map[room_id] = ( - last_membership_change_in_from_to_range.event_pos + newly_left_room_map[room_id] = RoomsForUserStateReset( + room_id=room_id, + sender=last_membership_change_in_from_to_range.sender, + membership=Membership.LEAVE, + event_id=last_membership_change_in_from_to_range.event_id, + event_pos=last_membership_change_in_from_to_range.event_pos, + room_version_id=await self.store.get_room_version_id(room_id), ) # 2) Figure out `newly_joined` @@ -1553,6 +1600,7 @@ class SlidingSyncRoomLists: self, user: UserID, sync_room_map: Dict[str, RoomsForUserType], + previous_connection_state: PerConnectionState, filters: SlidingSyncConfig.SlidingSyncList.Filters, to_token: StreamToken, dm_room_ids: AbstractSet[str], @@ -1738,14 +1786,33 @@ class SlidingSyncRoomLists: ) } + # Keep rooms if the user has been state reset out of it but we previously sent + # down the connection before. We want to make sure that we send these down to + # the client regardless of filters so they find out about the state reset. + # + # We don't always have access to the state in a room after being state reset if + # no one else locally on the server is participating in the room so we patch + # these back in manually. + state_reset_out_of_room_id_set = { + room_id + for room_id in sync_room_map.keys() + if sync_room_map[room_id].event_id is None + and previous_connection_state.rooms.have_sent_room(room_id).status + != HaveSentRoomFlag.NEVER + } + # Assemble a new sync room map but only with the `filtered_room_id_set` - return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} + return { + room_id: sync_room_map[room_id] + for room_id in filtered_room_id_set | state_reset_out_of_room_id_set + } @trace async def filter_rooms_using_tables( self, user_id: str, sync_room_map: Mapping[str, RoomsForUserSlidingSync], + previous_connection_state: PerConnectionState, filters: SlidingSyncConfig.SlidingSyncList.Filters, to_token: StreamToken, dm_room_ids: AbstractSet[str], @@ -1887,8 +1954,26 @@ class SlidingSyncRoomLists: ) } + # Keep rooms if the user has been state reset out of it but we previously sent + # down the connection before. We want to make sure that we send these down to + # the client regardless of filters so they find out about the state reset. + # + # We don't always have access to the state in a room after being state reset if + # no one else locally on the server is participating in the room so we patch + # these back in manually. + state_reset_out_of_room_id_set = { + room_id + for room_id in sync_room_map.keys() + if sync_room_map[room_id].event_id is None + and previous_connection_state.rooms.have_sent_room(room_id).status + != HaveSentRoomFlag.NEVER + } + # Assemble a new sync room map but only with the `filtered_room_id_set` - return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} + return { + room_id: sync_room_map[room_id] + for room_id in filtered_room_id_set | state_reset_out_of_room_id_set + } @trace async def sort_rooms( diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index ded7948713..0a62613d34 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1404,7 +1404,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) -> Mapping[str, RoomsForUserSlidingSync]: """Get all the rooms for a user to handle a sliding sync request. - Ignores forgotten rooms and rooms that the user has been kicked from. + Ignores forgotten rooms and rooms that the user has left themselves. Returns: Map from room ID to membership info @@ -1429,6 +1429,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join') WHERE user_id = ? AND m.forgotten = 0 + AND (m.membership != 'leave' OR m.user_id != m.sender) """ txn.execute(sql, (user_id,)) return { @@ -1455,6 +1456,49 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): get_sliding_sync_rooms_for_user_txn, ) + async def get_sliding_sync_room_for_user( + self, user_id: str, room_id: str + ) -> Optional[RoomsForUserSlidingSync]: + """Get the sliding sync room entry for the given user and room.""" + + def get_sliding_sync_room_for_user_txn( + txn: LoggingTransaction, + ) -> Optional[RoomsForUserSlidingSync]: + sql = """ + SELECT m.room_id, m.sender, m.membership, m.membership_event_id, + r.room_version, + m.event_instance_name, m.event_stream_ordering, + m.has_known_state, + COALESCE(j.room_type, m.room_type), + COALESCE(j.is_encrypted, m.is_encrypted) + FROM sliding_sync_membership_snapshots AS m + INNER JOIN rooms AS r USING (room_id) + LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join') + WHERE user_id = ? + AND m.forgotten = 0 + AND m.room_id = ? + """ + txn.execute(sql, (user_id, room_id)) + row = txn.fetchone() + if not row: + return None + + return RoomsForUserSlidingSync( + room_id=row[0], + sender=row[1], + membership=row[2], + event_id=row[3], + room_version_id=row[4], + event_pos=PersistedEventPosition(row[5], row[6]), + has_known_state=bool(row[7]), + room_type=row[8], + is_encrypted=row[9], + ) + + return await self.db_pool.runInteraction( + "get_sliding_sync_room_for_user", get_sliding_sync_room_for_user_txn + ) + class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__( diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index ca31122ad3..60312d770d 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -308,8 +308,24 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return create_event @cached(max_entries=10000) - async def get_room_type(self, room_id: str) -> Optional[str]: - raise NotImplementedError() + async def get_room_type(self, room_id: str) -> Union[Optional[str], Sentinel]: + """Fetch room type for given room. + + Since this function is cached, any missing values would be cached as + `None`. In order to distinguish between an unencrypted room that has + `None` encryption and a room that is unknown to the server where we + might want to omit the value (which would make it cached as `None`), + instead we use the sentinel value `ROOM_UNKNOWN_SENTINEL`. + """ + + try: + create_event = await self.get_create_event_for_room(room_id) + return create_event.content.get(EventContentFields.ROOM_TYPE) + except NotFoundError: + # We use the sentinel value to distinguish between `None` which is a + # valid room type and a room that is unknown to the server so the value + # is just unset. + return ROOM_UNKNOWN_SENTINEL @cachedList(cached_method_name="get_room_type", list_name="room_ids") async def bulk_get_room_type( diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 0ab7cb8dbd..964f41ca57 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -941,6 +941,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Returns: All membership changes to the current state in the token range. Events are sorted by `stream_ordering` ascending. + + `event_id`/`sender` can be `None` when the server leaves a room (meaning + everyone locally left) or a state reset which removed the person from the + room. We can't tell the difference between the two cases with what's + available in the `current_state_delta_stream` table. To actually check for a + state reset, you need to check if a membership still exists in the room. """ # Start by ruling out cases where a DB query is not necessary. if from_key == to_key: @@ -1052,6 +1058,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): membership=( membership if membership is not None else Membership.LEAVE ), + # This will also be null for the same reasons if `s.event_id = null` sender=sender, # Prev event prev_event_id=prev_event_id, diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index c2cfb29866..ea3ca57957 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -15,7 +15,7 @@ import logging from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple from unittest.mock import AsyncMock -from parameterized import parameterized_class +from parameterized import parameterized, parameterized_class from typing_extensions import assert_never from twisted.test.proto_helpers import MemoryReactor @@ -23,12 +23,16 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import ( AccountDataTypes, + EventContentFields, EventTypes, + JoinRules, Membership, + RoomTypes, ) from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict from synapse.events.snapshot import EventContext +from synapse.handlers.sliding_sync import StateValues from synapse.rest.client import account_data, devices, login, receipts, room, sync from synapse.server import HomeServer from synapse.types import ( @@ -43,6 +47,7 @@ from synapse.util.stringutils import random_string from tests import unittest from tests.server import TimedOutException +from tests.test_utils.event_injection import create_event logger = logging.getLogger(__name__) @@ -421,6 +426,9 @@ class SlidingSyncTestCase(SlidingSyncBase): self.event_sources = hs.get_event_sources() self.storage_controllers = hs.get_storage_controllers() self.account_data_handler = hs.get_account_data_handler() + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.persistence = persistence super().prepare(reactor, clock, hs) @@ -988,3 +996,472 @@ class SlidingSyncTestCase(SlidingSyncBase): # Make the Sliding Sync request response_body, _ = self.do_sync(sync_body, tok=user1_tok) self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + + def test_state_reset_room_comes_down_incremental_sync(self) -> None: + """Test that a room that we were state reset out of comes down + incremental sync""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + is_public=True, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + + # Create an event for us to point back to for the state reset + event_response = self.helper.send(room_id1, "test", tok=user2_tok) + event_id = event_response["event_id"] + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + # Request all state just to see what we get back when we are + # state reset out of the room + [StateValues.WILDCARD, StateValues.WILDCARD] + ], + "timeline_limit": 1, + } + } + } + + # Make the Sliding Sync request + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # Make sure we see room1 + self.assertIncludes(set(response_body["rooms"].keys()), {room_id1}, exact=True) + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + + # Trigger a state reset + join_rule_event, join_rule_context = self.get_success( + create_event( + self.hs, + prev_event_ids=[event_id], + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.INVITE}, + sender=user2_id, + room_id=room_id1, + room_version=self.get_success(self.store.get_room_version_id(room_id1)), + ) + ) + _, join_rule_event_pos, _ = self.get_success( + self.persistence.persist_event(join_rule_event, join_rule_context) + ) + + # FIXME: We're manually busting the cache since + # https://github.com/element-hq/synapse/issues/17368 is not solved yet + self.store._membership_stream_cache.entity_has_changed( + user1_id, join_rule_event_pos.stream + ) + + # Ensure that the state reset worked and only user2 is in the room now + users_in_room = self.get_success(self.store.get_users_in_room(room_id1)) + self.assertIncludes(set(users_in_room), {user2_id}, exact=True) + + state_map_at_reset = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Update the state after user1 was state reset out of the room + self.helper.send_state( + room_id1, + EventTypes.Name, + {EventContentFields.ROOM_NAME: "my super duper room"}, + tok=user2_tok, + ) + + # Make another Sliding Sync request (incremental) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Expect to see room1 because it is `newly_left` thanks to being state reset out + # of it since the last time we synced. We need to let the client know that + # something happened and that they are no longer in the room. + self.assertIncludes(set(response_body["rooms"].keys()), {room_id1}, exact=True) + # We set `initial=True` to indicate that the client should reset the state they + # have about the room + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + # They shouldn't see anything past the state reset + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + # We should see all the state events in the room + state_map_at_reset.values(), + exact=True, + ) + # The position where the state reset happened + self.assertEqual( + response_body["rooms"][room_id1]["bump_stamp"], + join_rule_event_pos.stream, + response_body["rooms"][room_id1], + ) + + # Other non-important things. We just want to check what these are so we know + # what happens in a state reset scenario. + # + # Room name was set at the time of the state reset so we should still be able to + # see it. + self.assertEqual(response_body["rooms"][room_id1]["name"], "my super room") + # Could be set but there is no avatar for this room + self.assertIsNone( + response_body["rooms"][room_id1].get("avatar"), + response_body["rooms"][room_id1], + ) + # Could be set but this room isn't marked as a DM + self.assertIsNone( + response_body["rooms"][room_id1].get("is_dm"), + response_body["rooms"][room_id1], + ) + # Empty timeline because we are not in the room at all (they are all being + # filtered out) + self.assertIsNone( + response_body["rooms"][room_id1].get("timeline"), + response_body["rooms"][room_id1], + ) + # `limited` since we're not providing any timeline events but there are some in + # the room. + self.assertEqual(response_body["rooms"][room_id1]["limited"], True) + # User is no longer in the room so they can't see this info + self.assertIsNone( + response_body["rooms"][room_id1].get("joined_count"), + response_body["rooms"][room_id1], + ) + self.assertIsNone( + response_body["rooms"][room_id1].get("invited_count"), + response_body["rooms"][room_id1], + ) + + def test_state_reset_previously_room_comes_down_incremental_sync_with_filters( + self, + ) -> None: + """ + Test that a room that we were state reset out of should always be sent down + regardless of the filters if it has been sent down the connection before. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a space room + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}, + "name": "my super space", + }, + ) + + # Create an event for us to point back to for the state reset + event_response = self.helper.send(space_room_id, "test", tok=user2_tok) + event_id = event_response["event_id"] + + self.helper.join(space_room_id, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + # Request all state just to see what we get back when we are + # state reset out of the room + [StateValues.WILDCARD, StateValues.WILDCARD] + ], + "timeline_limit": 1, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + } + } + } + + # Make the Sliding Sync request + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # Make sure we see room1 + self.assertIncludes( + set(response_body["rooms"].keys()), {space_room_id}, exact=True + ) + self.assertEqual(response_body["rooms"][space_room_id]["initial"], True) + + # Trigger a state reset + join_rule_event, join_rule_context = self.get_success( + create_event( + self.hs, + prev_event_ids=[event_id], + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.INVITE}, + sender=user2_id, + room_id=space_room_id, + room_version=self.get_success( + self.store.get_room_version_id(space_room_id) + ), + ) + ) + _, join_rule_event_pos, _ = self.get_success( + self.persistence.persist_event(join_rule_event, join_rule_context) + ) + + # FIXME: We're manually busting the cache since + # https://github.com/element-hq/synapse/issues/17368 is not solved yet + self.store._membership_stream_cache.entity_has_changed( + user1_id, join_rule_event_pos.stream + ) + + # Ensure that the state reset worked and only user2 is in the room now + users_in_room = self.get_success(self.store.get_users_in_room(space_room_id)) + self.assertIncludes(set(users_in_room), {user2_id}, exact=True) + + state_map_at_reset = self.get_success( + self.storage_controllers.state.get_current_state(space_room_id) + ) + + # Update the state after user1 was state reset out of the room + self.helper.send_state( + space_room_id, + EventTypes.Name, + {EventContentFields.ROOM_NAME: "my super duper space"}, + tok=user2_tok, + ) + + # User2 also leaves the room so the server is no longer participating in the room + # and we don't have access to current state + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + + # Make another Sliding Sync request (incremental) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Expect to see room1 because it is `newly_left` thanks to being state reset out + # of it since the last time we synced. We need to let the client know that + # something happened and that they are no longer in the room. + self.assertIncludes( + set(response_body["rooms"].keys()), {space_room_id}, exact=True + ) + # We set `initial=True` to indicate that the client should reset the state they + # have about the room + self.assertEqual(response_body["rooms"][space_room_id]["initial"], True) + # They shouldn't see anything past the state reset + self._assertRequiredStateIncludes( + response_body["rooms"][space_room_id]["required_state"], + # We should see all the state events in the room + state_map_at_reset.values(), + exact=True, + ) + # The position where the state reset happened + self.assertEqual( + response_body["rooms"][space_room_id]["bump_stamp"], + join_rule_event_pos.stream, + response_body["rooms"][space_room_id], + ) + + # Other non-important things. We just want to check what these are so we know + # what happens in a state reset scenario. + # + # Room name was set at the time of the state reset so we should still be able to + # see it. + self.assertEqual( + response_body["rooms"][space_room_id]["name"], "my super space" + ) + # Could be set but there is no avatar for this room + self.assertIsNone( + response_body["rooms"][space_room_id].get("avatar"), + response_body["rooms"][space_room_id], + ) + # Could be set but this room isn't marked as a DM + self.assertIsNone( + response_body["rooms"][space_room_id].get("is_dm"), + response_body["rooms"][space_room_id], + ) + # Empty timeline because we are not in the room at all (they are all being + # filtered out) + self.assertIsNone( + response_body["rooms"][space_room_id].get("timeline"), + response_body["rooms"][space_room_id], + ) + # `limited` since we're not providing any timeline events but there are some in + # the room. + self.assertEqual(response_body["rooms"][space_room_id]["limited"], True) + # User is no longer in the room so they can't see this info + self.assertIsNone( + response_body["rooms"][space_room_id].get("joined_count"), + response_body["rooms"][space_room_id], + ) + self.assertIsNone( + response_body["rooms"][space_room_id].get("invited_count"), + response_body["rooms"][space_room_id], + ) + + @parameterized.expand( + [ + ("server_leaves_room", True), + ("server_participating_in_room", False), + ] + ) + def test_state_reset_never_room_incremental_sync_with_filters( + self, test_description: str, server_leaves_room: bool + ) -> None: + """ + Test that a room that we were state reset out of should be sent down if we can + figure out the state or if it was sent down the connection before. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a space room + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}, + "name": "my super space", + }, + ) + + # Create another space room + space_room_id2 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}, + }, + ) + + # Create an event for us to point back to for the state reset + event_response = self.helper.send(space_room_id, "test", tok=user2_tok) + event_id = event_response["event_id"] + + # User1 joins the rooms + # + self.helper.join(space_room_id, user1_id, tok=user1_tok) + # Join space_room_id2 so that it is at the top of the list + self.helper.join(space_room_id2, user1_id, tok=user1_tok) + + # Make a SS request for only the top room. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [ + # Request all state just to see what we get back when we are + # state reset out of the room + [StateValues.WILDCARD, StateValues.WILDCARD] + ], + "timeline_limit": 1, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + } + } + } + + # Make the Sliding Sync request + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # Make sure we only see space_room_id2 + self.assertIncludes( + set(response_body["rooms"].keys()), {space_room_id2}, exact=True + ) + self.assertEqual(response_body["rooms"][space_room_id2]["initial"], True) + + # Just create some activity in space_room_id2 so it appears when we incremental sync again + self.helper.send(space_room_id2, "test", tok=user2_tok) + + # Trigger a state reset + join_rule_event, join_rule_context = self.get_success( + create_event( + self.hs, + prev_event_ids=[event_id], + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.INVITE}, + sender=user2_id, + room_id=space_room_id, + room_version=self.get_success( + self.store.get_room_version_id(space_room_id) + ), + ) + ) + _, join_rule_event_pos, _ = self.get_success( + self.persistence.persist_event(join_rule_event, join_rule_context) + ) + + # FIXME: We're manually busting the cache since + # https://github.com/element-hq/synapse/issues/17368 is not solved yet + self.store._membership_stream_cache.entity_has_changed( + user1_id, join_rule_event_pos.stream + ) + + # Ensure that the state reset worked and only user2 is in the room now + users_in_room = self.get_success(self.store.get_users_in_room(space_room_id)) + self.assertIncludes(set(users_in_room), {user2_id}, exact=True) + + # Update the state after user1 was state reset out of the room. + # This will also bump it to the top of the list. + self.helper.send_state( + space_room_id, + EventTypes.Name, + {EventContentFields.ROOM_NAME: "my super duper space"}, + tok=user2_tok, + ) + + if server_leaves_room: + # User2 also leaves the room so the server is no longer participating in the room + # and we don't have access to current state + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + + # Make another Sliding Sync request (incremental) + sync_body = { + "lists": { + "foo-list": { + # Expand the range to include all rooms + "ranges": [[0, 1]], + "required_state": [ + # Request all state just to see what we get back when we are + # state reset out of the room + [StateValues.WILDCARD, StateValues.WILDCARD] + ], + "timeline_limit": 1, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + } + } + } + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + if self.use_new_tables: + if server_leaves_room: + # We still only expect to see space_room_id2 because even though we were state + # reset out of space_room_id, it was never sent down the connection before so we + # don't need to bother the client with it. + self.assertIncludes( + set(response_body["rooms"].keys()), {space_room_id2}, exact=True + ) + else: + # Both rooms show up because we can figure out the state for the + # `filters.room_types` if someone is still in the room (we look at the + # current state because `room_type` never changes). + self.assertIncludes( + set(response_body["rooms"].keys()), + {space_room_id, space_room_id2}, + exact=True, + ) + else: + # Both rooms show up because we can actually take the time to figure out the + # state for the `filters.room_types` in the fallback path (we look at + # historical state for `LEAVE` membership). + self.assertIncludes( + set(response_body["rooms"].keys()), + {space_room_id, space_room_id2}, + exact=True, + ) diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index 837eb434aa..ed5f286243 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -27,7 +27,13 @@ from immutabledict import immutabledict from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import Direction, EventTypes, Membership, RelationTypes +from synapse.api.constants import ( + Direction, + EventTypes, + JoinRules, + Membership, + RelationTypes, +) from synapse.api.filtering import Filter from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import FrozenEventV3 @@ -1154,7 +1160,7 @@ class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase): room_id=room_id1, event_id=None, event_pos=dummy_state_pos, - membership="leave", + membership=Membership.LEAVE, sender=None, # user1_id, prev_event_id=join_response1["event_id"], prev_event_pos=join_pos1, @@ -1164,6 +1170,81 @@ class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase): ], ) + def test_state_reset2(self) -> None: + """ + Test a state reset scenario where the user gets removed from the room (when + there is no corresponding leave event) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, is_public=True, tok=user2_tok) + + event_response = self.helper.send(room_id1, "test", tok=user2_tok) + event_id = event_response["event_id"] + + user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + user1_join_pos = self.get_success( + self.store.get_position_for_event(user1_join_response["event_id"]) + ) + + before_reset_token = self.event_sources.get_current_token() + + # Trigger a state reset + join_rule_event, join_rule_context = self.get_success( + create_event( + self.hs, + prev_event_ids=[event_id], + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.INVITE}, + sender=user2_id, + room_id=room_id1, + room_version=self.get_success(self.store.get_room_version_id(room_id1)), + ) + ) + _, join_rule_event_pos, _ = self.get_success( + self.persistence.persist_event(join_rule_event, join_rule_context) + ) + + # FIXME: We're manually busting the cache since + # https://github.com/element-hq/synapse/issues/17368 is not solved yet + self.store._membership_stream_cache.entity_has_changed( + user1_id, join_rule_event_pos.stream + ) + + after_reset_token = self.event_sources.get_current_token() + + membership_changes = self.get_success( + self.store.get_current_state_delta_membership_changes_for_user( + user1_id, + from_key=before_reset_token.room_key, + to_key=after_reset_token.room_key, + ) + ) + + # Let the whole diff show on failure + self.maxDiff = None + self.assertEqual( + membership_changes, + [ + CurrentStateDeltaMembership( + room_id=room_id1, + event_id=None, + # The position where the state reset happened + event_pos=join_rule_event_pos, + membership=Membership.LEAVE, + sender=None, + prev_event_id=user1_join_response["event_id"], + prev_event_pos=user1_join_pos, + prev_membership="join", + prev_sender=user1_id, + ), + ], + ) + def test_excluded_room_ids(self) -> None: """ Test that the `excluded_room_ids` option excludes changes from the specified rooms. From a851f6b237ad7b4488d2d80c0b0777436d24da6a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Sep 2024 21:51:51 +0300 Subject: [PATCH 263/332] Sliding sync: Add connection tracking to the `account_data` extension (#17695) This is basically exactly the same logic as for receipts. Essentially we just need to track which room account data we have and haven't sent down to clients, and use that when we pull stuff out. I think this just needs a couple of extra tests written --------- Co-authored-by: Eric Eastwood --- changelog.d/17695.bugfix | 1 + synapse/handlers/sliding_sync/extensions.py | 183 +++++-- .../storage/databases/main/account_data.py | 50 ++ .../storage/databases/main/sliding_sync.py | 37 ++ synapse/storage/databases/main/tags.py | 46 ++ synapse/types/handlers/sliding_sync.py | 7 +- .../test_extension_account_data.py | 497 +++++++++++++++++- 7 files changed, 748 insertions(+), 73 deletions(-) create mode 100644 changelog.d/17695.bugfix diff --git a/changelog.d/17695.bugfix b/changelog.d/17695.bugfix new file mode 100644 index 0000000000..c63132704f --- /dev/null +++ b/changelog.d/17695.bugfix @@ -0,0 +1 @@ +Fix bug where room account data would not correctly be sent down sliding sync for old rooms. diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 287f4b04ad..56e1d9329e 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -19,7 +19,6 @@ from typing import ( AbstractSet, ChainMap, Dict, - List, Mapping, MutableMapping, Optional, @@ -119,6 +118,8 @@ class SlidingSyncExtensionHandler: if sync_config.extensions.account_data is not None: account_data_response = await self.get_account_data_extension_response( sync_config=sync_config, + previous_connection_state=previous_connection_state, + new_connection_state=new_connection_state, actual_lists=actual_lists, actual_room_ids=actual_room_ids, account_data_request=sync_config.extensions.account_data, @@ -361,6 +362,8 @@ class SlidingSyncExtensionHandler: async def get_account_data_extension_response( self, sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + new_connection_state: "MutablePerConnectionState", actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], actual_room_ids: Set[str], account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension, @@ -425,15 +428,7 @@ class SlidingSyncExtensionHandler: # Fetch room account data # - # List of -> Mapping from room_id to mapping of `type` to `content` of room - # account data events. - # - # This is is a list so we can avoid making copies of immutable data and instead - # just provide multiple maps that need to be combined. Normally, we could - # reach for `ChainMap` in this scenario, but this is a nested map and accessing - # the ChainMap by room_id won't combine the two maps for that room (we would - # need a new `NestedChainMap` type class). - account_data_by_room_maps: List[Mapping[str, Mapping[str, JsonMapping]]] = [] + account_data_by_room_map: MutableMapping[str, Mapping[str, JsonMapping]] = {} relevant_room_ids = self.find_relevant_room_ids_for_extension( requested_lists=account_data_request.lists, requested_room_ids=account_data_request.rooms, @@ -441,9 +436,43 @@ class SlidingSyncExtensionHandler: actual_room_ids=actual_room_ids, ) if len(relevant_room_ids) > 0: + # We need to handle the different cases depending on if we have sent + # down account data previously or not, so we split the relevant + # rooms up into different collections based on status. + live_rooms = set() + previously_rooms: Dict[str, int] = {} + initial_rooms = set() + + for room_id in relevant_room_ids: + if not from_token: + initial_rooms.add(room_id) + continue + + room_status = previous_connection_state.account_data.have_sent_room( + room_id + ) + if room_status.status == HaveSentRoomFlag.LIVE: + live_rooms.add(room_id) + elif room_status.status == HaveSentRoomFlag.PREVIOUSLY: + assert room_status.last_token is not None + previously_rooms[room_id] = room_status.last_token + elif room_status.status == HaveSentRoomFlag.NEVER: + initial_rooms.add(room_id) + else: + assert_never(room_status.status) + + # We fetch all room account data since the from_token. This is so + # that we can record which rooms have updates that haven't been sent + # down. + # + # Mapping from room_id to mapping of `type` to `content` of room account + # data events. + all_updates_since_the_from_token: Mapping[ + str, Mapping[str, JsonMapping] + ] = {} if from_token is not None: # TODO: This should take into account the `from_token` and `to_token` - account_data_by_room_map = ( + all_updates_since_the_from_token = ( await self.store.get_updated_room_account_data_for_user( user_id, from_token.stream_token.account_data_key ) @@ -456,58 +485,108 @@ class SlidingSyncExtensionHandler: user_id, from_token.stream_token.account_data_key ) for room_id, tags in tags_by_room.items(): - account_data_by_room_map.setdefault(room_id, {})[ + all_updates_since_the_from_token.setdefault(room_id, {})[ AccountDataTypes.TAG ] = {"tags": tags} - account_data_by_room_maps.append(account_data_by_room_map) - else: - # TODO: This should take into account the `to_token` - immutable_account_data_by_room_map = ( - await self.store.get_room_account_data_for_user(user_id) - ) - account_data_by_room_maps.append(immutable_account_data_by_room_map) + # For live rooms we just get the updates from `all_updates_since_the_from_token` + if live_rooms: + for room_id in all_updates_since_the_from_token.keys() & live_rooms: + account_data_by_room_map[room_id] = ( + all_updates_since_the_from_token[room_id] + ) - # Add room tags - # - # TODO: This should take into account the `to_token` - tags_by_room = await self.store.get_tags_for_user(user_id) - account_data_by_room_maps.append( - { - room_id: {AccountDataTypes.TAG: {"tags": tags}} - for room_id, tags in tags_by_room.items() - } + # For previously and initial rooms we query each room individually. + if previously_rooms or initial_rooms: + + async def handle_previously(room_id: str) -> None: + # Either get updates or all account data in the room + # depending on if the room state is PREVIOUSLY or NEVER. + previous_token = previously_rooms.get(room_id) + if previous_token is not None: + room_account_data = await ( + self.store.get_updated_room_account_data_for_user_for_room( + user_id=user_id, + room_id=room_id, + from_stream_id=previous_token, + to_stream_id=to_token.account_data_key, + ) + ) + + # Add room tags + changed = await self.store.has_tags_changed_for_room( + user_id=user_id, + room_id=room_id, + from_stream_id=previous_token, + to_stream_id=to_token.account_data_key, + ) + if changed: + # XXX: Ideally, this should take into account the `to_token` + # and return the set of tags at that time but we don't track + # changes to tags so we just have to return all tags for the + # room. + immutable_tag_map = await self.store.get_tags_for_room( + user_id, room_id + ) + room_account_data[AccountDataTypes.TAG] = { + "tags": immutable_tag_map + } + + # Only add an entry if there were any updates. + if room_account_data: + account_data_by_room_map[room_id] = room_account_data + else: + # TODO: This should take into account the `to_token` + immutable_room_account_data = ( + await self.store.get_account_data_for_room(user_id, room_id) + ) + + # Add room tags + # + # XXX: Ideally, this should take into account the `to_token` + # and return the set of tags at that time but we don't track + # changes to tags so we just have to return all tags for the + # room. + immutable_tag_map = await self.store.get_tags_for_room( + user_id, room_id + ) + + account_data_by_room_map[room_id] = ChainMap( + {AccountDataTypes.TAG: {"tags": immutable_tag_map}} + if immutable_tag_map + else {}, + # Cast is safe because `ChainMap` only mutates the top-most map, + # see https://github.com/python/typeshed/issues/8430 + cast( + MutableMapping[str, JsonMapping], + immutable_room_account_data, + ), + ) + + # We handle these rooms concurrently to speed it up. + await concurrently_execute( + handle_previously, + previously_rooms.keys() | initial_rooms, + limit=20, ) - # Filter down to the relevant rooms ... and combine the maps - relevant_account_data_by_room_map: MutableMapping[ - str, Mapping[str, JsonMapping] - ] = {} - for room_id in relevant_room_ids: - # We want to avoid adding empty maps for relevant rooms that have no room - # account data so do a quick check to see if it's in any of the maps. - is_room_in_maps = False - for room_map in account_data_by_room_maps: - if room_id in room_map: - is_room_in_maps = True - break + # Now record which rooms are now up to data, and which rooms have + # pending updates to send. + new_connection_state.account_data.record_sent_rooms(relevant_room_ids) + missing_updates = ( + all_updates_since_the_from_token.keys() - relevant_room_ids + ) + if missing_updates: + # If we have missing updates then we must have had a from_token. + assert from_token is not None - # If we found the room in any of the maps, combine the maps for that room - if is_room_in_maps: - relevant_account_data_by_room_map[room_id] = ChainMap( - {}, - *( - # Cast is safe because `ChainMap` only mutates the top-most map, - # see https://github.com/python/typeshed/issues/8430 - cast(MutableMapping[str, JsonMapping], room_map[room_id]) - for room_map in account_data_by_room_maps - if room_map.get(room_id) - ), + new_connection_state.account_data.record_unsent_rooms( + missing_updates, from_token.stream_token.account_data_key ) return SlidingSyncResult.Extensions.AccountDataExtension( global_account_data_map=global_account_data_map, - account_data_by_room_map=relevant_account_data_by_room_map, + account_data_by_room_map=account_data_by_room_map, ) @trace diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index b30639b4e6..e583c182ba 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -467,6 +467,56 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) get_updated_room_account_data_for_user_txn, ) + async def get_updated_room_account_data_for_user_for_room( + self, + # Since there are multiple arguments with the same type, force keyword arguments + # so people don't accidentally swap the order + *, + user_id: str, + room_id: str, + from_stream_id: int, + to_stream_id: int, + ) -> Dict[str, JsonMapping]: + """Get the room account_data that's changed for a user in a room. + + (> `from_stream_id` and <= `to_stream_id`) + + Args: + user_id: The user to get the account_data for. + room_id: The room to check + from_stream_id: The point in the stream to fetch from + to_stream_id: The point in the stream to fetch to + + Returns: + A dict of the room account data. + """ + + def get_updated_room_account_data_for_user_for_room_txn( + txn: LoggingTransaction, + ) -> Dict[str, JsonMapping]: + sql = """ + SELECT account_data_type, content FROM room_account_data + WHERE user_id = ? AND room_id = ? AND stream_id > ? AND stream_id <= ? + """ + txn.execute(sql, (user_id, room_id, from_stream_id, to_stream_id)) + + room_account_data: Dict[str, JsonMapping] = {} + for row in txn: + room_account_data[row[0]] = db_to_json(row[1]) + + return room_account_data + + changed = self._account_data_stream_cache.has_entity_changed( + user_id, int(from_stream_id) + ) + if not changed: + return {} + + return await self.db_pool.runInteraction( + "get_updated_room_account_data_for_user_for_room", + get_updated_room_account_data_for_user_for_room_txn, + ) + @cached(max_entries=5000, iterable=True) async def ignored_by(self, user_id: str) -> FrozenSet[str]: """ diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index 83939d10b0..f2df37fec1 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -267,6 +267,15 @@ class SlidingSyncStore(SQLBaseStore): (have_sent_room.status.value, have_sent_room.last_token) ) + for ( + room_id, + have_sent_room, + ) in per_connection_state.account_data._statuses.items(): + key_values.append((connection_position, "account_data", room_id)) + value_values.append( + (have_sent_room.status.value, have_sent_room.last_token) + ) + self.db_pool.simple_upsert_many_txn( txn, table="sliding_sync_connection_streams", @@ -407,6 +416,7 @@ class SlidingSyncStore(SQLBaseStore): # Now look up the per-room stream data. rooms: Dict[str, HaveSentRoom[str]] = {} receipts: Dict[str, HaveSentRoom[str]] = {} + account_data: Dict[str, HaveSentRoom[str]] = {} receipt_rows = self.db_pool.simple_select_list_txn( txn, @@ -427,6 +437,8 @@ class SlidingSyncStore(SQLBaseStore): rooms[room_id] = have_sent_room elif stream == "receipts": receipts[room_id] = have_sent_room + elif stream == "account_data": + account_data[room_id] = have_sent_room else: # For forwards compatibility we ignore unknown streams, as in # future we want to be able to easily add more stream types. @@ -435,6 +447,7 @@ class SlidingSyncStore(SQLBaseStore): return PerConnectionStateDB( rooms=RoomStatusMap(rooms), receipts=RoomStatusMap(receipts), + account_data=RoomStatusMap(account_data), room_configs=room_configs, ) @@ -452,6 +465,7 @@ class PerConnectionStateDB: rooms: "RoomStatusMap[str]" receipts: "RoomStatusMap[str]" + account_data: "RoomStatusMap[str]" room_configs: Mapping[str, "RoomSyncConfig"] @@ -484,10 +498,21 @@ class PerConnectionStateDB: for room_id, status in per_connection_state.receipts.get_updates().items() } + account_data = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + str(status.last_token) if status.last_token is not None else None + ), + ) + for room_id, status in per_connection_state.account_data.get_updates().items() + } + log_kv( { "rooms": rooms, "receipts": receipts, + "account_data": account_data, "room_configs": per_connection_state.room_configs.maps[0], } ) @@ -495,6 +520,7 @@ class PerConnectionStateDB: return PerConnectionStateDB( rooms=RoomStatusMap(rooms), receipts=RoomStatusMap(receipts), + account_data=RoomStatusMap(account_data), room_configs=per_connection_state.room_configs.maps[0], ) @@ -524,8 +550,19 @@ class PerConnectionStateDB: for room_id, status in self.receipts._statuses.items() } + account_data = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + int(status.last_token) if status.last_token is not None else None + ), + ) + for room_id, status in self.account_data._statuses.items() + } + return PerConnectionState( rooms=RoomStatusMap(rooms), receipts=RoomStatusMap(receipts), + account_data=RoomStatusMap(account_data), room_configs=self.room_configs, ) diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index b498cb9625..44f395f315 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -158,6 +158,52 @@ class TagsWorkerStore(AccountDataWorkerStore): return results + async def has_tags_changed_for_room( + self, + # Since there are multiple arguments with the same type, force keyword arguments + # so people don't accidentally swap the order + *, + user_id: str, + room_id: str, + from_stream_id: int, + to_stream_id: int, + ) -> bool: + """Check if the users tags for a room have been updated in the token range + + (> `from_stream_id` and <= `to_stream_id`) + + Args: + user_id: The user to get tags for + room_id: The room to get tags for + from_stream_id: The point in the stream to fetch from + to_stream_id: The point in the stream to fetch to + + Returns: + A mapping of tags to tag content. + """ + + # Shortcut if no room has changed for the user + changed = self._account_data_stream_cache.has_entity_changed( + user_id, int(from_stream_id) + ) + if not changed: + return False + + last_change_position_for_room = await self.db_pool.simple_select_one_onecol( + table="room_tags_revisions", + keyvalues={"user_id": user_id, "room_id": room_id}, + retcol="stream_id", + allow_none=True, + ) + + if last_change_position_for_room is None: + return False + + return ( + last_change_position_for_room > from_stream_id + and last_change_position_for_room <= to_stream_id + ) + @cached(num_args=2, tree=True) async def get_tags_for_room( self, user_id: str, room_id: str diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index 149920f883..5dd2c9d411 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -675,7 +675,7 @@ class HaveSentRoomFlag(Enum): LIVE = "live" -T = TypeVar("T", str, RoomStreamToken, MultiWriterStreamToken) +T = TypeVar("T", str, RoomStreamToken, MultiWriterStreamToken, int) @attr.s(auto_attribs=True, slots=True, frozen=True) @@ -823,6 +823,7 @@ class PerConnectionState: rooms: RoomStatusMap[RoomStreamToken] = attr.Factory(RoomStatusMap) receipts: RoomStatusMap[MultiWriterStreamToken] = attr.Factory(RoomStatusMap) + account_data: RoomStatusMap[int] = attr.Factory(RoomStatusMap) room_configs: Mapping[str, RoomSyncConfig] = attr.Factory(dict) @@ -833,6 +834,7 @@ class PerConnectionState: return MutablePerConnectionState( rooms=self.rooms.get_mutable(), receipts=self.receipts.get_mutable(), + account_data=self.account_data.get_mutable(), room_configs=ChainMap({}, room_configs), ) @@ -840,6 +842,7 @@ class PerConnectionState: return PerConnectionState( rooms=self.rooms.copy(), receipts=self.receipts.copy(), + account_data=self.account_data.copy(), room_configs=dict(self.room_configs), ) @@ -853,6 +856,7 @@ class MutablePerConnectionState(PerConnectionState): rooms: MutableRoomStatusMap[RoomStreamToken] receipts: MutableRoomStatusMap[MultiWriterStreamToken] + account_data: MutableRoomStatusMap[int] room_configs: typing.ChainMap[str, RoomSyncConfig] @@ -860,6 +864,7 @@ class MutablePerConnectionState(PerConnectionState): return ( bool(self.rooms.get_updates()) or bool(self.receipts.get_updates()) + or bool(self.account_data.get_updates()) or bool(self.get_room_config_updates()) ) diff --git a/tests/rest/client/sliding_sync/test_extension_account_data.py b/tests/rest/client/sliding_sync/test_extension_account_data.py index 03b2db39b9..799fbb1856 100644 --- a/tests/rest/client/sliding_sync/test_extension_account_data.py +++ b/tests/rest/client/sliding_sync/test_extension_account_data.py @@ -11,9 +11,11 @@ # See the GNU Affero General Public License for more details: # . # +import enum import logging -from parameterized import parameterized_class +from parameterized import parameterized, parameterized_class +from typing_extensions import assert_never from twisted.test.proto_helpers import MemoryReactor @@ -30,6 +32,11 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +class TagAction(enum.Enum): + ADD = enum.auto() + REMOVE = enum.auto() + + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the # foreground update for # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by @@ -350,10 +357,20 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): account_data_map[AccountDataTypes.TAG], {"tags": {"m.favourite": {}}} ) - def test_room_account_data_incremental_sync(self) -> None: + @parameterized.expand( + [ + ("add tags", TagAction.ADD), + ("remove tags", TagAction.REMOVE), + ] + ) + def test_room_account_data_incremental_sync( + self, test_description: str, tag_action: TagAction + ) -> None: """ On incremental sync, we return all account data for a given room but only for rooms that we request and are being returned in the Sliding Sync response. + + (HaveSentRoomFlag.LIVE) """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -432,23 +449,42 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) - # Add another room tag - self.get_success( - self.account_data_handler.add_tag_to_room( - user_id=user1_id, - room_id=room_id1, - tag="m.server_notice", - content={}, + if tag_action == TagAction.ADD: + # Add another room tag + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.server_notice", + content={}, + ) ) - ) - self.get_success( - self.account_data_handler.add_tag_to_room( - user_id=user1_id, - room_id=room_id2, - tag="m.server_notice", - content={}, + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.server_notice", + content={}, + ) ) - ) + elif tag_action == TagAction.REMOVE: + # Remove the room tag + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + ) + ) + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + ) + ) + else: + assert_never(tag_action) # Make an incremental Sliding Sync request with the account_data extension enabled response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) @@ -475,10 +511,431 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): exact=True, ) self.assertEqual(account_data_map["org.matrix.roorarraz2"], {"roo": "rar"}) - self.assertEqual( - account_data_map[AccountDataTypes.TAG], - {"tags": {"m.favourite": {}, "m.server_notice": {}}}, + if tag_action == TagAction.ADD: + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {"m.favourite": {}, "m.server_notice": {}}}, + ) + elif tag_action == TagAction.REMOVE: + # If we previously showed the client that the room has tags, when it no + # longer has tags, we need to show them an empty map. + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {}}, + ) + else: + assert_never(tag_action) + + @parameterized.expand( + [ + ("add tags", TagAction.ADD), + ("remove tags", TagAction.REMOVE), + ] + ) + def test_room_account_data_incremental_sync_out_of_range_never( + self, test_description: str, tag_action: TagAction + ) -> None: + """Tests that we don't return account data for rooms that are out of + range, but then do send all account data once they're in range. + + (initial/HaveSentRoomFlag.NEVER) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room and add some room account data + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + content={}, + ) + ) + + # Create another room with some room account data + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + content={}, + ) + ) + + # Now send a message into room1 so that it is at the top of the list + self.helper.send(room_id1, body="new event", tok=user1_tok) + + # Make a SS request for only the top room. + sync_body = { + "lists": { + "main": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "account_data": { + "enabled": True, + "lists": ["main"], + } + }, + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Only room1 should be in the response since it's the latest room with activity + # and our range only includes 1 room. + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + + # Add some other room account data + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + if tag_action == TagAction.ADD: + # Add another room tag + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.server_notice", + content={}, + ) + ) + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.server_notice", + content={}, + ) + ) + elif tag_action == TagAction.REMOVE: + # Remove the room tag + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + ) + ) + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + ) + ) + else: + assert_never(tag_action) + + # Move room2 into range. + self.helper.send(room_id2, body="new event", tok=user1_tok) + + # Make an incremental Sliding Sync request with the account_data extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertIsNotNone(response_body["extensions"]["account_data"].get("global")) + # We expect to see the account data of room2, as that has the most + # recent update. + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id2}, + exact=True, + ) + # Since this is the first time we're seeing room2 down sync, we should see all + # room account data for it. + account_data_map = { + event["type"]: event["content"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id2) + } + expected_account_data_keys = { + "org.matrix.roorarraz", + "org.matrix.roorarraz2", + } + if tag_action == TagAction.ADD: + expected_account_data_keys.add(AccountDataTypes.TAG) + self.assertIncludes( + account_data_map.keys(), + expected_account_data_keys, + exact=True, + ) + self.assertEqual(account_data_map["org.matrix.roorarraz"], {"roo": "rar"}) + self.assertEqual(account_data_map["org.matrix.roorarraz2"], {"roo": "rar"}) + if tag_action == TagAction.ADD: + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {"m.favourite": {}, "m.server_notice": {}}}, + ) + elif tag_action == TagAction.REMOVE: + # Since we never told the client about the room tags, we don't need to say + # anything if there are no tags now (the client doesn't need an update). + self.assertIsNone( + account_data_map.get(AccountDataTypes.TAG), + account_data_map, + ) + else: + assert_never(tag_action) + + @parameterized.expand( + [ + ("add tags", TagAction.ADD), + ("remove tags", TagAction.REMOVE), + ] + ) + def test_room_account_data_incremental_sync_out_of_range_previously( + self, test_description: str, tag_action: TagAction + ) -> None: + """Tests that we don't return account data for rooms that fall out of + range, but then do send all account data that has changed they're back in range. + + (HaveSentRoomFlag.PREVIOUSLY) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room and add some room account data + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + content={}, + ) + ) + + # Create another room with some room account data + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + content={}, + ) + ) + + # Make an initial Sliding Sync request for only room1 and room2. + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + }, + room_id2: { + "required_state": [], + "timeline_limit": 0, + }, + }, + "extensions": { + "account_data": { + "enabled": True, + "rooms": [room_id1, room_id2], + } + }, + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Both rooms show up because we have a room subscription for each and they're + # requested in the `account_data` extension. + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id1, room_id2}, + exact=True, + ) + + # Add some other room account data + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + if tag_action == TagAction.ADD: + # Add another room tag + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.server_notice", + content={}, + ) + ) + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.server_notice", + content={}, + ) + ) + elif tag_action == TagAction.REMOVE: + # Remove the room tag + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + ) + ) + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + ) + ) + else: + assert_never(tag_action) + + # Make an incremental Sliding Sync request for just room1 + response_body, from_token = self.do_sync( + { + **sync_body, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + }, + }, + }, + since=from_token, + tok=user1_tok, + ) + + # Only room1 shows up because we only have a room subscription for room1 now. + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + + # Make an incremental Sliding Sync request for just room2 now + response_body, from_token = self.do_sync( + { + **sync_body, + "room_subscriptions": { + room_id2: { + "required_state": [], + "timeline_limit": 0, + }, + }, + }, + since=from_token, + tok=user1_tok, + ) + + # Only room2 shows up because we only have a room subscription for room2 now. + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id2}, + exact=True, + ) + + self.assertIsNotNone(response_body["extensions"]["account_data"].get("global")) + # Check for room account data for room2 + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id2}, + exact=True, + ) + # We should see any room account data updates for room2 since the last + # time we saw it down sync + account_data_map = { + event["type"]: event["content"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id2) + } + self.assertIncludes( + account_data_map.keys(), + {"org.matrix.roorarraz2", AccountDataTypes.TAG}, + exact=True, + ) + self.assertEqual(account_data_map["org.matrix.roorarraz2"], {"roo": "rar"}) + if tag_action == TagAction.ADD: + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {"m.favourite": {}, "m.server_notice": {}}}, + ) + elif tag_action == TagAction.REMOVE: + # If we previously showed the client that the room has tags, when it no + # longer has tags, we need to show them an empty map. + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {}}, + ) + else: + assert_never(tag_action) def test_wait_for_new_data(self) -> None: """ From 75e2c17d2a4ebe9e8841e1f7229f9d12ea1c3999 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 20 Sep 2024 08:12:56 +0100 Subject: [PATCH 264/332] Speed up sorting of sliding sync rooms in initial request (#17734) We do this by using the event stream cache. --------- Co-authored-by: Devon Hudson --- changelog.d/17734.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 71 ++++++++++++++++++++- 2 files changed, 70 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17734.misc diff --git a/changelog.d/17734.misc b/changelog.d/17734.misc new file mode 100644 index 0000000000..0bb5533ab6 --- /dev/null +++ b/changelog.d/17734.misc @@ -0,0 +1 @@ +Minor speed up of initial sliding sync requests. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index bf19eb735b..0c9722021a 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -420,6 +420,10 @@ class SlidingSyncRoomLists: Dict[str, RoomsForUserType], filtered_sync_room_map ), to_token, + # We only need to sort the rooms up to the end + # of the largest range. Both sides of range are + # inclusive so we `+ 1`. + limit=max(range[1] + 1 for range in list_config.ranges), ) for range in list_config.ranges: @@ -462,7 +466,7 @@ class SlidingSyncRoomLists: ) lists[list_key] = SlidingSyncResult.SlidingWindowList( - count=len(sorted_room_info), + count=len(filtered_sync_room_map), ops=ops, ) @@ -1980,15 +1984,21 @@ class SlidingSyncRoomLists: self, sync_room_map: Dict[str, RoomsForUserType], to_token: StreamToken, + limit: Optional[int] = None, ) -> List[RoomsForUserType]: """ Sort by `stream_ordering` of the last event that the user should see in the room. `stream_ordering` is unique so we get a stable sort. + If `limit` is specified then sort may return fewer entries, but will + always return at least the top N rooms. This is useful as we don't always + need to sort the full list, but are just interested in the top N. + Args: sync_room_map: Dictionary of room IDs to sort along with membership information in the room at the time of `to_token`. to_token: We sort based on the events in the room at this token (<= `to_token`) + limit: The number of rooms that we need to return from the top of the list. Returns: A sorted list of room IDs by `stream_ordering` along with membership information. @@ -1998,8 +2008,23 @@ class SlidingSyncRoomLists: # user should see in the room (<= `to_token`) last_activity_in_room_map: Dict[str, int] = {} + # Same as above, except for positions that we know are in the event + # stream cache. + cached_positions: Dict[str, int] = {} + + earliest_cache_position = ( + self.store._events_stream_cache.get_earliest_known_position() + ) + for room_id, room_for_user in sync_room_map.items(): - if room_for_user.membership != Membership.JOIN: + if room_for_user.membership == Membership.JOIN: + # For joined rooms check the stream change cache. + cached_position = ( + self.store._events_stream_cache.get_max_pos_of_last_change(room_id) + ) + if cached_position is not None: + cached_positions[room_id] = cached_position + else: # If the user has left/been invited/knocked/been banned from a # room, they shouldn't see anything past that point. # @@ -2009,6 +2034,48 @@ class SlidingSyncRoomLists: # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 last_activity_in_room_map[room_id] = room_for_user.event_pos.stream + # If the stream position is in range of the stream change cache + # we can include it. + if room_for_user.event_pos.stream > earliest_cache_position: + cached_positions[room_id] = room_for_user.event_pos.stream + + # If we are only asked for the top N rooms, and we have enough from + # looking in the stream change cache, then we can return early. This + # is because the cache must include all entries above + # `.get_earliest_known_position()`. + if limit is not None and len(cached_positions) >= limit: + # ... but first we need to handle the case where the cached max + # position is greater than the to_token, in which case we do + # actually query the DB. This should happen rarely, so can do it in + # a loop. + for room_id, position in list(cached_positions.items()): + if position > to_token.room_key.stream: + result = await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, to_token.room_key + ) + if ( + result is not None + and result[1].stream > earliest_cache_position + ): + # We have a stream position in the cached range. + cached_positions[room_id] = result[1].stream + else: + # No position in the range, so we remove the entry. + cached_positions.pop(room_id) + + if limit is not None and len(cached_positions) >= limit: + return sorted( + ( + room + for room in sync_room_map.values() + if room.room_id in cached_positions + ), + # Sort by the last activity (stream_ordering) in the room + key=lambda room_info: cached_positions[room_info.room_id], + # We want descending order + reverse=True, + ) + # For fully-joined rooms, we find the latest activity at/before the # `to_token`. joined_room_positions = ( From 5173741c71b42f36ea7fc169f38c53466e660a01 Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Mon, 23 Sep 2024 08:33:48 -0400 Subject: [PATCH 265/332] Support MSC4140: Delayed events (Futures) (#17326) --- changelog.d/17326.feature | 2 + .../conf/workers-shared-extra.yaml.j2 | 3 + .../configuration/config_documentation.md | 13 + docs/workers.md | 1 + scripts-dev/complement.sh | 1 + synapse/app/generic_worker.py | 2 + synapse/config/server.py | 13 +- synapse/handlers/delayed_events.py | 484 ++++++++++++++++ synapse/replication/http/__init__.py | 2 + synapse/replication/http/delayed_events.py | 48 ++ synapse/rest/__init__.py | 4 +- synapse/rest/client/delayed_events.py | 97 ++++ synapse/rest/client/room.py | 91 ++- synapse/rest/client/versions.py | 4 +- synapse/server.py | 8 +- synapse/storage/databases/main/__init__.py | 4 +- .../storage/databases/main/delayed_events.py | 523 ++++++++++++++++++ synapse/storage/schema/__init__.py | 8 +- .../main/delta/88/01_add_delayed_events.sql | 30 + tests/rest/client/test_delayed_events.py | 346 ++++++++++++ tests/rest/client/test_rooms.py | 100 ++++ 21 files changed, 1772 insertions(+), 12 deletions(-) create mode 100644 changelog.d/17326.feature create mode 100644 synapse/handlers/delayed_events.py create mode 100644 synapse/replication/http/delayed_events.py create mode 100644 synapse/rest/client/delayed_events.py create mode 100644 synapse/storage/databases/main/delayed_events.py create mode 100644 synapse/storage/schema/main/delta/88/01_add_delayed_events.sql create mode 100644 tests/rest/client/test_delayed_events.py diff --git a/changelog.d/17326.feature b/changelog.d/17326.feature new file mode 100644 index 0000000000..348c54b040 --- /dev/null +++ b/changelog.d/17326.feature @@ -0,0 +1,2 @@ +Add initial implementation of delayed events as proposed by [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140). + diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index 6588b3ce14..b9334cc53b 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -111,6 +111,9 @@ server_notices: system_mxid_avatar_url: "" room_name: "Server Alert" +# Enable delayed events (msc4140) +max_event_delay_duration: 24h + # Disable sync cache so that initial `/sync` requests are up-to-date. caches: diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 282b59dec9..f51924f064 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -761,6 +761,19 @@ email: password_reset: "[%(server_name)s] Password reset" email_validation: "[%(server_name)s] Validate your email" ``` +--- +### `max_event_delay_duration` + +The maximum allowed duration by which sent events can be delayed, as per +[MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140). +Must be a positive value if set. + +Defaults to no duration (`null`), which disallows sending delayed events. + +Example configuration: +```yaml +max_event_delay_duration: 24h +``` ## Homeserver blocking Useful options for Synapse admins. diff --git a/docs/workers.md b/docs/workers.md index fbf539fa7e..51b22fef9b 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -290,6 +290,7 @@ information. Additionally, the following REST endpoints can be handled for GET requests: ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/ + ^/_matrix/client/unstable/org.matrix.msc4140/delayed_events Pagination requests can also be handled, but all requests for a given room must be routed to the same instance. Additionally, care must be taken to diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 4ad547bc7e..8fef1ae022 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -223,6 +223,7 @@ test_packages=( ./tests/msc3930 ./tests/msc3902 ./tests/msc3967 + ./tests/msc4140 ) # Enable dirty runs, so tests will reuse the same container where possible. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 18d294f2b2..6a944998f1 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -65,6 +65,7 @@ from synapse.storage.databases.main.appservice import ( ) from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.client_ips import ClientIpWorkerStore +from synapse.storage.databases.main.delayed_events import DelayedEventsStore from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore from synapse.storage.databases.main.devices import DeviceWorkerStore from synapse.storage.databases.main.directory import DirectoryWorkerStore @@ -161,6 +162,7 @@ class GenericWorkerStore( TaskSchedulerWorkerStore, ExperimentalFeaturesStore, SlidingSyncStore, + DelayedEventsStore, ): # Properties that multiple storage classes define. Tell mypy what the # expected type is. diff --git a/synapse/config/server.py b/synapse/config/server.py index 488604a30c..6a8c7cb1c9 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2014-2021 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -780,6 +780,17 @@ class ServerConfig(Config): else: self.delete_stale_devices_after = None + # The maximum allowed delay duration for delayed events (MSC4140). + max_event_delay_duration = config.get("max_event_delay_duration") + if max_event_delay_duration is not None: + self.max_event_delay_ms: Optional[int] = self.parse_duration( + max_event_delay_duration + ) + if self.max_event_delay_ms <= 0: + raise ConfigError("max_event_delay_duration must be a positive value") + else: + self.max_event_delay_ms = None + def has_tls_listener(self) -> bool: return any(listener.is_tls() for listener in self.listeners) diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py new file mode 100644 index 0000000000..9d59a09948 --- /dev/null +++ b/synapse/handlers/delayed_events.py @@ -0,0 +1,484 @@ +import logging +from typing import TYPE_CHECKING, List, Optional, Set, Tuple + +from twisted.internet.interfaces import IDelayedCall + +from synapse.api.constants import EventTypes +from synapse.api.errors import ShadowBanError +from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME +from synapse.logging.opentracing import set_tag +from synapse.metrics import event_processing_positions +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.replication.http.delayed_events import ( + ReplicationAddedDelayedEventRestServlet, +) +from synapse.storage.databases.main.delayed_events import ( + DelayedEventDetails, + DelayID, + EventType, + StateKey, + Timestamp, + UserLocalpart, +) +from synapse.storage.databases.main.state_deltas import StateDelta +from synapse.types import ( + JsonDict, + Requester, + RoomID, + UserID, + create_requester, +) +from synapse.util.events import generate_fake_event_id +from synapse.util.metrics import Measure + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class DelayedEventsHandler: + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastores().main + self._storage_controllers = hs.get_storage_controllers() + self._config = hs.config + self._clock = hs.get_clock() + self._request_ratelimiter = hs.get_request_ratelimiter() + self._event_creation_handler = hs.get_event_creation_handler() + self._room_member_handler = hs.get_room_member_handler() + + self._next_delayed_event_call: Optional[IDelayedCall] = None + + # The current position in the current_state_delta stream + self._event_pos: Optional[int] = None + + # Guard to ensure we only process event deltas one at a time + self._event_processing = False + + if hs.config.worker.worker_app is None: + self._repl_client = None + + async def _schedule_db_events() -> None: + # We kick this off to pick up outstanding work from before the last restart. + # Block until we're up to date. + await self._unsafe_process_new_event() + hs.get_notifier().add_replication_callback(self.notify_new_event) + # Kick off again (without blocking) to catch any missed notifications + # that may have fired before the callback was added. + self._clock.call_later(0, self.notify_new_event) + + # Delayed events that are already marked as processed on startup might not have been + # sent properly on the last run of the server, so unmark them to send them again. + # Caveat: this will double-send delayed events that successfully persisted, but failed + # to be removed from the DB table of delayed events. + # TODO: To avoid double-sending, scan the timeline to find which of these events were + # already sent. To do so, must store delay_ids in sent events to retrieve them later. + await self._store.unprocess_delayed_events() + + events, next_send_ts = await self._store.process_timeout_delayed_events( + self._get_current_ts() + ) + + if next_send_ts: + self._schedule_next_at(next_send_ts) + + # Can send the events in background after having awaited on marking them as processed + run_as_background_process( + "_send_events", + self._send_events, + events, + ) + + self._initialized_from_db = run_as_background_process( + "_schedule_db_events", _schedule_db_events + ) + else: + self._repl_client = ReplicationAddedDelayedEventRestServlet.make_client(hs) + + @property + def _is_master(self) -> bool: + return self._repl_client is None + + def notify_new_event(self) -> None: + """ + Called when there may be more state event deltas to process, + which should cancel pending delayed events for the same state. + """ + if self._event_processing: + return + + self._event_processing = True + + async def process() -> None: + try: + await self._unsafe_process_new_event() + finally: + self._event_processing = False + + run_as_background_process("delayed_events.notify_new_event", process) + + async def _unsafe_process_new_event(self) -> None: + # If self._event_pos is None then means we haven't fetched it from the DB yet + if self._event_pos is None: + self._event_pos = await self._store.get_delayed_events_stream_pos() + room_max_stream_ordering = self._store.get_room_max_stream_ordering() + if self._event_pos > room_max_stream_ordering: + # apparently, we've processed more events than exist in the database! + # this can happen if events are removed with history purge or similar. + logger.warning( + "Event stream ordering appears to have gone backwards (%i -> %i): " + "rewinding delayed events processor", + self._event_pos, + room_max_stream_ordering, + ) + self._event_pos = room_max_stream_ordering + + # Loop round handling deltas until we're up to date + while True: + with Measure(self._clock, "delayed_events_delta"): + room_max_stream_ordering = self._store.get_room_max_stream_ordering() + if self._event_pos == room_max_stream_ordering: + return + + logger.debug( + "Processing delayed events %s->%s", + self._event_pos, + room_max_stream_ordering, + ) + ( + max_pos, + deltas, + ) = await self._storage_controllers.state.get_current_state_deltas( + self._event_pos, room_max_stream_ordering + ) + + logger.debug( + "Handling %d state deltas for delayed events processing", + len(deltas), + ) + await self._handle_state_deltas(deltas) + + self._event_pos = max_pos + + # Expose current event processing position to prometheus + event_processing_positions.labels("delayed_events").set(max_pos) + + await self._store.update_delayed_events_stream_pos(max_pos) + + async def _handle_state_deltas(self, deltas: List[StateDelta]) -> None: + """ + Process current state deltas to cancel pending delayed events + that target the same state. + """ + for delta in deltas: + logger.debug( + "Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id + ) + + next_send_ts = await self._store.cancel_delayed_state_events( + room_id=delta.room_id, + event_type=delta.event_type, + state_key=delta.state_key, + ) + + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at_or_none(next_send_ts) + + async def add( + self, + requester: Requester, + *, + room_id: str, + event_type: str, + state_key: Optional[str], + origin_server_ts: Optional[int], + content: JsonDict, + delay: int, + ) -> str: + """ + Creates a new delayed event and schedules its delivery. + + Args: + requester: The requester of the delayed event, who will be its owner. + room_id: The ID of the room where the event should be sent to. + event_type: The type of event to be sent. + state_key: The state key of the event to be sent, or None if it is not a state event. + origin_server_ts: The custom timestamp to send the event with. + If None, the timestamp will be the actual time when the event is sent. + content: The content of the event to be sent. + delay: How long (in milliseconds) to wait before automatically sending the event. + + Returns: The ID of the added delayed event. + + Raises: + SynapseError: if the delayed event fails validation checks. + """ + await self._request_ratelimiter.ratelimit(requester) + + self._event_creation_handler.validator.validate_builder( + self._event_creation_handler.event_builder_factory.for_room_version( + await self._store.get_room_version(room_id), + { + "type": event_type, + "content": content, + "room_id": room_id, + "sender": str(requester.user), + **({"state_key": state_key} if state_key is not None else {}), + }, + ) + ) + + creation_ts = self._get_current_ts() + + delay_id, next_send_ts = await self._store.add_delayed_event( + user_localpart=requester.user.localpart, + device_id=requester.device_id, + creation_ts=creation_ts, + room_id=room_id, + event_type=event_type, + state_key=state_key, + origin_server_ts=origin_server_ts, + content=content, + delay=delay, + ) + + if self._repl_client is not None: + # NOTE: If this throws, the delayed event will remain in the DB and + # will be picked up once the main worker gets another delayed event. + await self._repl_client( + instance_name=MAIN_PROCESS_INSTANCE_NAME, + next_send_ts=next_send_ts, + ) + elif self._next_send_ts_changed(next_send_ts): + self._schedule_next_at(next_send_ts) + + return delay_id + + def on_added(self, next_send_ts: int) -> None: + next_send_ts = Timestamp(next_send_ts) + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at(next_send_ts) + + async def cancel(self, requester: Requester, delay_id: str) -> None: + """ + Cancels the scheduled delivery of the matching delayed event. + + Args: + requester: The owner of the delayed event to act on. + delay_id: The ID of the delayed event to act on. + + Raises: + NotFoundError: if no matching delayed event could be found. + """ + assert self._is_master + await self._request_ratelimiter.ratelimit(requester) + await self._initialized_from_db + + next_send_ts = await self._store.cancel_delayed_event( + delay_id=delay_id, + user_localpart=requester.user.localpart, + ) + + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at_or_none(next_send_ts) + + async def restart(self, requester: Requester, delay_id: str) -> None: + """ + Restarts the scheduled delivery of the matching delayed event. + + Args: + requester: The owner of the delayed event to act on. + delay_id: The ID of the delayed event to act on. + + Raises: + NotFoundError: if no matching delayed event could be found. + """ + assert self._is_master + await self._request_ratelimiter.ratelimit(requester) + await self._initialized_from_db + + next_send_ts = await self._store.restart_delayed_event( + delay_id=delay_id, + user_localpart=requester.user.localpart, + current_ts=self._get_current_ts(), + ) + + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at(next_send_ts) + + async def send(self, requester: Requester, delay_id: str) -> None: + """ + Immediately sends the matching delayed event, instead of waiting for its scheduled delivery. + + Args: + requester: The owner of the delayed event to act on. + delay_id: The ID of the delayed event to act on. + + Raises: + NotFoundError: if no matching delayed event could be found. + """ + assert self._is_master + await self._request_ratelimiter.ratelimit(requester) + await self._initialized_from_db + + event, next_send_ts = await self._store.process_target_delayed_event( + delay_id=delay_id, + user_localpart=requester.user.localpart, + ) + + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at_or_none(next_send_ts) + + await self._send_event( + DelayedEventDetails( + delay_id=DelayID(delay_id), + user_localpart=UserLocalpart(requester.user.localpart), + room_id=event.room_id, + type=event.type, + state_key=event.state_key, + origin_server_ts=event.origin_server_ts, + content=event.content, + device_id=event.device_id, + ) + ) + + async def _send_on_timeout(self) -> None: + self._next_delayed_event_call = None + + events, next_send_ts = await self._store.process_timeout_delayed_events( + self._get_current_ts() + ) + + if next_send_ts: + self._schedule_next_at(next_send_ts) + + await self._send_events(events) + + async def _send_events(self, events: List[DelayedEventDetails]) -> None: + sent_state: Set[Tuple[RoomID, EventType, StateKey]] = set() + for event in events: + if event.state_key is not None: + state_info = (event.room_id, event.type, event.state_key) + if state_info in sent_state: + continue + else: + state_info = None + try: + # TODO: send in background if message event or non-conflicting state event + await self._send_event(event) + if state_info is not None: + sent_state.add(state_info) + except Exception: + logger.exception("Failed to send delayed event") + + for room_id, event_type, state_key in sent_state: + await self._store.delete_processed_delayed_state_events( + room_id=str(room_id), + event_type=event_type, + state_key=state_key, + ) + + def _schedule_next_at_or_none(self, next_send_ts: Optional[Timestamp]) -> None: + if next_send_ts is not None: + self._schedule_next_at(next_send_ts) + elif self._next_delayed_event_call is not None: + self._next_delayed_event_call.cancel() + self._next_delayed_event_call = None + + def _schedule_next_at(self, next_send_ts: Timestamp) -> None: + delay = next_send_ts - self._get_current_ts() + delay_sec = delay / 1000 if delay > 0 else 0 + + if self._next_delayed_event_call is None: + self._next_delayed_event_call = self._clock.call_later( + delay_sec, + run_as_background_process, + "_send_on_timeout", + self._send_on_timeout, + ) + else: + self._next_delayed_event_call.reset(delay_sec) + + async def get_all_for_user(self, requester: Requester) -> List[JsonDict]: + """Return all pending delayed events requested by the given user.""" + await self._request_ratelimiter.ratelimit(requester) + return await self._store.get_all_delayed_events_for_user( + requester.user.localpart + ) + + async def _send_event( + self, + event: DelayedEventDetails, + txn_id: Optional[str] = None, + ) -> None: + user_id = UserID(event.user_localpart, self._config.server.server_name) + user_id_str = user_id.to_string() + # Create a new requester from what data is currently available + requester = create_requester( + user_id, + is_guest=await self._store.is_guest(user_id_str), + device_id=event.device_id, + ) + + try: + if event.state_key is not None and event.type == EventTypes.Member: + membership = event.content.get("membership") + assert membership is not None + event_id, _ = await self._room_member_handler.update_membership( + requester, + target=UserID.from_string(event.state_key), + room_id=event.room_id.to_string(), + action=membership, + content=event.content, + origin_server_ts=event.origin_server_ts, + ) + else: + event_dict: JsonDict = { + "type": event.type, + "content": event.content, + "room_id": event.room_id.to_string(), + "sender": user_id_str, + } + + if event.origin_server_ts is not None: + event_dict["origin_server_ts"] = event.origin_server_ts + + if event.state_key is not None: + event_dict["state_key"] = event.state_key + + ( + sent_event, + _, + ) = await self._event_creation_handler.create_and_send_nonmember_event( + requester, + event_dict, + txn_id=txn_id, + ) + event_id = sent_event.event_id + except ShadowBanError: + event_id = generate_fake_event_id() + finally: + # TODO: If this is a temporary error, retry. Otherwise, consider notifying clients of the failure + try: + await self._store.delete_processed_delayed_event( + event.delay_id, event.user_localpart + ) + except Exception: + logger.exception("Failed to delete processed delayed event") + + set_tag("event_id", event_id) + + def _get_current_ts(self) -> Timestamp: + return Timestamp(self._clock.time_msec()) + + def _next_send_ts_changed(self, next_send_ts: Optional[Timestamp]) -> bool: + # The DB alone knows if the next send time changed after adding/modifying + # a delayed event, but if we were to ever miss updating our delayed call's + # firing time, we may miss other updates. So, keep track of changes to the + # the next send time here instead of in the DB. + cached_next_send_ts = ( + int(self._next_delayed_event_call.getTime() * 1000) + if self._next_delayed_event_call is not None + else None + ) + return next_send_ts != cached_next_send_ts diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py index c9cf838255..1673bd057e 100644 --- a/synapse/replication/http/__init__.py +++ b/synapse/replication/http/__init__.py @@ -23,6 +23,7 @@ from typing import TYPE_CHECKING from synapse.http.server import JsonResource from synapse.replication.http import ( account_data, + delayed_events, devices, federation, login, @@ -64,3 +65,4 @@ class ReplicationRestResource(JsonResource): login.register_servlets(hs, self) register.register_servlets(hs, self) devices.register_servlets(hs, self) + delayed_events.register_servlets(hs, self) diff --git a/synapse/replication/http/delayed_events.py b/synapse/replication/http/delayed_events.py new file mode 100644 index 0000000000..77dabb08e6 --- /dev/null +++ b/synapse/replication/http/delayed_events.py @@ -0,0 +1,48 @@ +import logging +from typing import TYPE_CHECKING, Dict, Optional, Tuple + +from twisted.web.server import Request + +from synapse.http.server import HttpServer +from synapse.replication.http._base import ReplicationEndpoint +from synapse.types import JsonDict, JsonMapping + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class ReplicationAddedDelayedEventRestServlet(ReplicationEndpoint): + """Handle a delayed event being added by another worker. + + Request format: + + POST /_synapse/replication/delayed_event_added/ + + {} + """ + + NAME = "added_delayed_event" + PATH_ARGS = () + CACHE = False + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + + self.handler = hs.get_delayed_events_handler() + + @staticmethod + async def _serialize_payload(next_send_ts: int) -> JsonDict: # type: ignore[override] + return {"next_send_ts": next_send_ts} + + async def _handle_request( # type: ignore[override] + self, request: Request, content: JsonDict + ) -> Tuple[int, Dict[str, Optional[JsonMapping]]]: + self.handler.on_added(int(content["next_send_ts"])) + + return 200, {} + + +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + ReplicationAddedDelayedEventRestServlet(hs).register(http_server) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index c5cdc36955..4e594e6595 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2014-2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -31,6 +31,7 @@ from synapse.rest.client import ( auth, auth_issuer, capabilities, + delayed_events, devices, directory, events, @@ -81,6 +82,7 @@ CLIENT_SERVLET_FUNCTIONS: Tuple[RegisterServletsFunc, ...] = ( room.register_deprecated_servlets, events.register_servlets, room.register_servlets, + delayed_events.register_servlets, login.register_servlets, profile.register_servlets, presence.register_servlets, diff --git a/synapse/rest/client/delayed_events.py b/synapse/rest/client/delayed_events.py new file mode 100644 index 0000000000..eae5c9d226 --- /dev/null +++ b/synapse/rest/client/delayed_events.py @@ -0,0 +1,97 @@ +# This module contains REST servlets to do with delayed events: /delayed_events/ + +import logging +from enum import Enum +from http import HTTPStatus +from typing import TYPE_CHECKING, Tuple + +from synapse.api.errors import Codes, SynapseError +from synapse.http.server import HttpServer +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.http.site import SynapseRequest +from synapse.rest.client._base import client_patterns +from synapse.types import JsonDict + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class _UpdateDelayedEventAction(Enum): + CANCEL = "cancel" + RESTART = "restart" + SEND = "send" + + +class UpdateDelayedEventServlet(RestServlet): + PATTERNS = client_patterns( + r"/org\.matrix\.msc4140/delayed_events/(?P[^/]+)$", + releases=(), + ) + CATEGORY = "Delayed event management requests" + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.auth = hs.get_auth() + self.delayed_events_handler = hs.get_delayed_events_handler() + + async def on_POST( + self, request: SynapseRequest, delay_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + + body = parse_json_object_from_request(request) + try: + action = str(body["action"]) + except KeyError: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "'action' is missing", + Codes.MISSING_PARAM, + ) + try: + enum_action = _UpdateDelayedEventAction(action) + except ValueError: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "'action' is not one of " + + ", ".join(f"'{m.value}'" for m in _UpdateDelayedEventAction), + Codes.INVALID_PARAM, + ) + + if enum_action == _UpdateDelayedEventAction.CANCEL: + await self.delayed_events_handler.cancel(requester, delay_id) + elif enum_action == _UpdateDelayedEventAction.RESTART: + await self.delayed_events_handler.restart(requester, delay_id) + elif enum_action == _UpdateDelayedEventAction.SEND: + await self.delayed_events_handler.send(requester, delay_id) + return 200, {} + + +class DelayedEventsServlet(RestServlet): + PATTERNS = client_patterns( + r"/org\.matrix\.msc4140/delayed_events$", + releases=(), + ) + CATEGORY = "Delayed event management requests" + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.auth = hs.get_auth() + self.delayed_events_handler = hs.get_delayed_events_handler() + + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + # TODO: Support Pagination stream API ("from" query parameter) + delayed_events = await self.delayed_events_handler.get_all_for_user(requester) + + ret = {"delayed_events": delayed_events} + return 200, ret + + +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + # The following can't currently be instantiated on workers. + if hs.config.worker.worker_app is None: + UpdateDelayedEventServlet(hs).register(http_server) + DelayedEventsServlet(hs).register(http_server) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 23c909ab14..8883cd6bc0 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2014-2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -195,7 +195,9 @@ class RoomStateEventRestServlet(RestServlet): self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self.message_handler = hs.get_message_handler() + self.delayed_events_handler = hs.get_delayed_events_handler() self.auth = hs.get_auth() + self._max_event_delay_ms = hs.config.server.max_event_delay_ms def register(self, http_server: HttpServer) -> None: # /rooms/$roomid/state/$eventtype @@ -291,6 +293,22 @@ class RoomStateEventRestServlet(RestServlet): if requester.app_service: origin_server_ts = parse_integer(request, "ts") + delay = _parse_request_delay(request, self._max_event_delay_ms) + if delay is not None: + delay_id = await self.delayed_events_handler.add( + requester, + room_id=room_id, + event_type=event_type, + state_key=state_key, + origin_server_ts=origin_server_ts, + content=content, + delay=delay, + ) + + set_tag("delay_id", delay_id) + ret = {"delay_id": delay_id} + return 200, ret + try: if event_type == EventTypes.Member: membership = content.get("membership", None) @@ -341,7 +359,9 @@ class RoomSendEventRestServlet(TransactionRestServlet): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() + self.delayed_events_handler = hs.get_delayed_events_handler() self.auth = hs.get_auth() + self._max_event_delay_ms = hs.config.server.max_event_delay_ms def register(self, http_server: HttpServer) -> None: # /rooms/$roomid/send/$event_type[/$txn_id] @@ -358,6 +378,26 @@ class RoomSendEventRestServlet(TransactionRestServlet): ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) + origin_server_ts = None + if requester.app_service: + origin_server_ts = parse_integer(request, "ts") + + delay = _parse_request_delay(request, self._max_event_delay_ms) + if delay is not None: + delay_id = await self.delayed_events_handler.add( + requester, + room_id=room_id, + event_type=event_type, + state_key=None, + origin_server_ts=origin_server_ts, + content=content, + delay=delay, + ) + + set_tag("delay_id", delay_id) + ret = {"delay_id": delay_id} + return 200, ret + event_dict: JsonDict = { "type": event_type, "content": content, @@ -365,10 +405,8 @@ class RoomSendEventRestServlet(TransactionRestServlet): "sender": requester.user.to_string(), } - if requester.app_service: - origin_server_ts = parse_integer(request, "ts") - if origin_server_ts is not None: - event_dict["origin_server_ts"] = origin_server_ts + if origin_server_ts is not None: + event_dict["origin_server_ts"] = origin_server_ts try: ( @@ -411,6 +449,49 @@ class RoomSendEventRestServlet(TransactionRestServlet): ) +def _parse_request_delay( + request: SynapseRequest, + max_delay: Optional[int], +) -> Optional[int]: + """Parses from the request string the delay parameter for + delayed event requests, and checks it for correctness. + + Args: + request: the twisted HTTP request. + max_delay: the maximum allowed value of the delay parameter, + or None if no delay parameter is allowed. + Returns: + The value of the requested delay, or None if it was absent. + + Raises: + SynapseError: if the delay parameter is present and forbidden, + or if it exceeds the maximum allowed value. + """ + delay = parse_integer(request, "org.matrix.msc4140.delay") + if delay is None: + return None + if max_delay is None: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Delayed events are not supported on this server", + Codes.UNKNOWN, + { + "org.matrix.msc4140.errcode": "M_MAX_DELAY_UNSUPPORTED", + }, + ) + if delay > max_delay: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "The requested delay exceeds the allowed maximum.", + Codes.UNKNOWN, + { + "org.matrix.msc4140.errcode": "M_MAX_DELAY_EXCEEDED", + "org.matrix.msc4140.max_delay": max_delay, + }, + ) + return delay + + # TODO: Needs unit testing for room ID + alias joins class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet): CATEGORY = "Event sending requests" diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 874869dc2d..1cfac87606 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -4,7 +4,7 @@ # Copyright 2019 The Matrix.org Foundation C.I.C. # Copyright 2017 Vector Creations Ltd # Copyright 2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -171,6 +171,8 @@ class VersionsRestServlet(RestServlet): is not None ) ), + # MSC4140: Delayed events + "org.matrix.msc4140": True, # MSC4151: Report room API (Client-Server API) "org.matrix.msc4151": self.config.experimental.msc4151_enabled, # Simplified sliding sync diff --git a/synapse/server.py b/synapse/server.py index d6c9cbdac0..318c6abf3d 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2021 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -68,6 +68,7 @@ from synapse.handlers.appservice import ApplicationServicesHandler from synapse.handlers.auth import AuthHandler, PasswordAuthProvider from synapse.handlers.cas import CasHandler from synapse.handlers.deactivate_account import DeactivateAccountHandler +from synapse.handlers.delayed_events import DelayedEventsHandler from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler from synapse.handlers.devicemessage import DeviceMessageHandler from synapse.handlers.directory import DirectoryHandler @@ -251,6 +252,7 @@ class HomeServer(metaclass=abc.ABCMeta): "account_validity", "auth", "deactivate_account", + "delayed_events", "message", "pagination", "profile", @@ -964,3 +966,7 @@ class HomeServer(metaclass=abc.ABCMeta): register_threadpool("media", media_threadpool) return media_threadpool + + @cache_in_self + def get_delayed_events_handler(self) -> DelayedEventsHandler: + return DelayedEventsHandler(self) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 9a43ab63e8..86431f6e40 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -3,7 +3,7 @@ # # Copyright 2019-2021 The Matrix.org Foundation C.I.C. # Copyright 2014-2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -44,6 +44,7 @@ from .appservice import ApplicationServiceStore, ApplicationServiceTransactionSt from .cache import CacheInvalidationWorkerStore from .censor_events import CensorEventsStore from .client_ips import ClientIpWorkerStore +from .delayed_events import DelayedEventsStore from .deviceinbox import DeviceInboxStore from .devices import DeviceStore from .directory import DirectoryStore @@ -158,6 +159,7 @@ class DataStore( SessionStore, TaskSchedulerWorkerStore, SlidingSyncStore, + DelayedEventsStore, ): def __init__( self, diff --git a/synapse/storage/databases/main/delayed_events.py b/synapse/storage/databases/main/delayed_events.py new file mode 100644 index 0000000000..1a7781713f --- /dev/null +++ b/synapse/storage/databases/main/delayed_events.py @@ -0,0 +1,523 @@ +import logging +from typing import List, NewType, Optional, Tuple + +import attr + +from synapse.api.errors import NotFoundError +from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage.database import LoggingTransaction, StoreError +from synapse.storage.engines import PostgresEngine +from synapse.types import JsonDict, RoomID +from synapse.util import json_encoder, stringutils as stringutils + +logger = logging.getLogger(__name__) + + +DelayID = NewType("DelayID", str) +UserLocalpart = NewType("UserLocalpart", str) +DeviceID = NewType("DeviceID", str) +EventType = NewType("EventType", str) +StateKey = NewType("StateKey", str) + +Delay = NewType("Delay", int) +Timestamp = NewType("Timestamp", int) + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class EventDetails: + room_id: RoomID + type: EventType + state_key: Optional[StateKey] + origin_server_ts: Optional[Timestamp] + content: JsonDict + device_id: Optional[DeviceID] + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class DelayedEventDetails(EventDetails): + delay_id: DelayID + user_localpart: UserLocalpart + + +class DelayedEventsStore(SQLBaseStore): + async def get_delayed_events_stream_pos(self) -> int: + """ + Gets the stream position of the background process to watch for state events + that target the same piece of state as any pending delayed events. + """ + return await self.db_pool.simple_select_one_onecol( + table="delayed_events_stream_pos", + keyvalues={}, + retcol="stream_id", + desc="get_delayed_events_stream_pos", + ) + + async def update_delayed_events_stream_pos(self, stream_id: Optional[int]) -> None: + """ + Updates the stream position of the background process to watch for state events + that target the same piece of state as any pending delayed events. + + Must only be used by the worker running the background process. + """ + await self.db_pool.simple_update_one( + table="delayed_events_stream_pos", + keyvalues={}, + updatevalues={"stream_id": stream_id}, + desc="update_delayed_events_stream_pos", + ) + + async def add_delayed_event( + self, + *, + user_localpart: str, + device_id: Optional[str], + creation_ts: Timestamp, + room_id: str, + event_type: str, + state_key: Optional[str], + origin_server_ts: Optional[int], + content: JsonDict, + delay: int, + ) -> Tuple[DelayID, Timestamp]: + """ + Inserts a new delayed event in the DB. + + Returns: The generated ID assigned to the added delayed event, + and the send time of the next delayed event to be sent, + which is either the event just added or one added earlier. + """ + delay_id = _generate_delay_id() + send_ts = Timestamp(creation_ts + delay) + + def add_delayed_event_txn(txn: LoggingTransaction) -> Timestamp: + self.db_pool.simple_insert_txn( + txn, + table="delayed_events", + values={ + "delay_id": delay_id, + "user_localpart": user_localpart, + "device_id": device_id, + "delay": delay, + "send_ts": send_ts, + "room_id": room_id, + "event_type": event_type, + "state_key": state_key, + "origin_server_ts": origin_server_ts, + "content": json_encoder.encode(content), + }, + ) + + next_send_ts = self._get_next_delayed_event_send_ts_txn(txn) + assert next_send_ts is not None + return next_send_ts + + next_send_ts = await self.db_pool.runInteraction( + "add_delayed_event", add_delayed_event_txn + ) + + return delay_id, next_send_ts + + async def restart_delayed_event( + self, + *, + delay_id: str, + user_localpart: str, + current_ts: Timestamp, + ) -> Timestamp: + """ + Restarts the send time of the matching delayed event, + as long as it hasn't already been marked for processing. + + Args: + delay_id: The ID of the delayed event to restart. + user_localpart: The localpart of the delayed event's owner. + current_ts: The current time, which will be used to calculate the new send time. + + Returns: The send time of the next delayed event to be sent, + which is either the event just restarted, or another one + with an earlier send time than the restarted one's new send time. + + Raises: + NotFoundError: if there is no matching delayed event. + """ + + def restart_delayed_event_txn( + txn: LoggingTransaction, + ) -> Timestamp: + txn.execute( + """ + UPDATE delayed_events + SET send_ts = ? + delay + WHERE delay_id = ? AND user_localpart = ? + AND NOT is_processed + """, + ( + current_ts, + delay_id, + user_localpart, + ), + ) + if txn.rowcount == 0: + raise NotFoundError("Delayed event not found") + + next_send_ts = self._get_next_delayed_event_send_ts_txn(txn) + assert next_send_ts is not None + return next_send_ts + + return await self.db_pool.runInteraction( + "restart_delayed_event", restart_delayed_event_txn + ) + + async def get_all_delayed_events_for_user( + self, + user_localpart: str, + ) -> List[JsonDict]: + """Returns all pending delayed events owned by the given user.""" + # TODO: Support Pagination stream API ("next_batch" field) + rows = await self.db_pool.execute( + "get_all_delayed_events_for_user", + """ + SELECT + delay_id, + room_id, + event_type, + state_key, + delay, + send_ts, + content + FROM delayed_events + WHERE user_localpart = ? AND NOT is_processed + ORDER BY send_ts + """, + user_localpart, + ) + return [ + { + "delay_id": DelayID(row[0]), + "room_id": str(RoomID.from_string(row[1])), + "type": EventType(row[2]), + **({"state_key": StateKey(row[3])} if row[3] is not None else {}), + "delay": Delay(row[4]), + "running_since": Timestamp(row[5] - row[4]), + "content": db_to_json(row[6]), + } + for row in rows + ] + + async def process_timeout_delayed_events( + self, current_ts: Timestamp + ) -> Tuple[ + List[DelayedEventDetails], + Optional[Timestamp], + ]: + """ + Marks for processing all delayed events that should have been sent prior to the provided time + that haven't already been marked as such. + + Returns: The details of all newly-processed delayed events, + and the send time of the next delayed event to be sent, if any. + """ + + def process_timeout_delayed_events_txn( + txn: LoggingTransaction, + ) -> Tuple[ + List[DelayedEventDetails], + Optional[Timestamp], + ]: + sql_cols = ", ".join( + ( + "delay_id", + "user_localpart", + "room_id", + "event_type", + "state_key", + "origin_server_ts", + "send_ts", + "content", + "device_id", + ) + ) + sql_update = "UPDATE delayed_events SET is_processed = TRUE" + sql_where = "WHERE send_ts <= ? AND NOT is_processed" + sql_args = (current_ts,) + sql_order = "ORDER BY send_ts" + if isinstance(self.database_engine, PostgresEngine): + # Do this only in Postgres because: + # - SQLite's RETURNING emits rows in an arbitrary order + # - https://www.sqlite.org/lang_returning.html#limitations_and_caveats + # - SQLite does not support data-modifying statements in a WITH clause + # - https://www.sqlite.org/lang_with.html + # - https://www.postgresql.org/docs/current/queries-with.html#QUERIES-WITH-MODIFYING + txn.execute( + f""" + WITH events_to_send AS ( + {sql_update} {sql_where} RETURNING * + ) SELECT {sql_cols} FROM events_to_send {sql_order} + """, + sql_args, + ) + rows = txn.fetchall() + else: + txn.execute( + f"SELECT {sql_cols} FROM delayed_events {sql_where} {sql_order}", + sql_args, + ) + rows = txn.fetchall() + txn.execute(f"{sql_update} {sql_where}", sql_args) + assert txn.rowcount == len(rows) + + events = [ + DelayedEventDetails( + RoomID.from_string(row[2]), + EventType(row[3]), + StateKey(row[4]) if row[4] is not None else None, + # If no custom_origin_ts is set, use send_ts as the event's timestamp + Timestamp(row[5] if row[5] is not None else row[6]), + db_to_json(row[7]), + DeviceID(row[8]) if row[8] is not None else None, + DelayID(row[0]), + UserLocalpart(row[1]), + ) + for row in rows + ] + next_send_ts = self._get_next_delayed_event_send_ts_txn(txn) + return events, next_send_ts + + return await self.db_pool.runInteraction( + "process_timeout_delayed_events", process_timeout_delayed_events_txn + ) + + async def process_target_delayed_event( + self, + *, + delay_id: str, + user_localpart: str, + ) -> Tuple[ + EventDetails, + Optional[Timestamp], + ]: + """ + Marks for processing the matching delayed event, regardless of its timeout time, + as long as it has not already been marked as such. + + Args: + delay_id: The ID of the delayed event to restart. + user_localpart: The localpart of the delayed event's owner. + + Returns: The details of the matching delayed event, + and the send time of the next delayed event to be sent, if any. + + Raises: + NotFoundError: if there is no matching delayed event. + """ + + def process_target_delayed_event_txn( + txn: LoggingTransaction, + ) -> Tuple[ + EventDetails, + Optional[Timestamp], + ]: + sql_cols = ", ".join( + ( + "room_id", + "event_type", + "state_key", + "origin_server_ts", + "content", + "device_id", + ) + ) + sql_update = "UPDATE delayed_events SET is_processed = TRUE" + sql_where = "WHERE delay_id = ? AND user_localpart = ? AND NOT is_processed" + sql_args = (delay_id, user_localpart) + txn.execute( + ( + f"{sql_update} {sql_where} RETURNING {sql_cols}" + if self.database_engine.supports_returning + else f"SELECT {sql_cols} FROM delayed_events {sql_where}" + ), + sql_args, + ) + row = txn.fetchone() + if row is None: + raise NotFoundError("Delayed event not found") + elif not self.database_engine.supports_returning: + txn.execute(f"{sql_update} {sql_where}", sql_args) + assert txn.rowcount == 1 + + event = EventDetails( + RoomID.from_string(row[0]), + EventType(row[1]), + StateKey(row[2]) if row[2] is not None else None, + Timestamp(row[3]) if row[3] is not None else None, + db_to_json(row[4]), + DeviceID(row[5]) if row[5] is not None else None, + ) + + return event, self._get_next_delayed_event_send_ts_txn(txn) + + return await self.db_pool.runInteraction( + "process_target_delayed_event", process_target_delayed_event_txn + ) + + async def cancel_delayed_event( + self, + *, + delay_id: str, + user_localpart: str, + ) -> Optional[Timestamp]: + """ + Cancels the matching delayed event, i.e. remove it as long as it hasn't been processed. + + Args: + delay_id: The ID of the delayed event to restart. + user_localpart: The localpart of the delayed event's owner. + + Returns: The send time of the next delayed event to be sent, if any. + + Raises: + NotFoundError: if there is no matching delayed event. + """ + + def cancel_delayed_event_txn( + txn: LoggingTransaction, + ) -> Optional[Timestamp]: + try: + self.db_pool.simple_delete_one_txn( + txn, + table="delayed_events", + keyvalues={ + "delay_id": delay_id, + "user_localpart": user_localpart, + "is_processed": False, + }, + ) + except StoreError: + if txn.rowcount == 0: + raise NotFoundError("Delayed event not found") + else: + raise + + return self._get_next_delayed_event_send_ts_txn(txn) + + return await self.db_pool.runInteraction( + "cancel_delayed_event", cancel_delayed_event_txn + ) + + async def cancel_delayed_state_events( + self, + *, + room_id: str, + event_type: str, + state_key: str, + ) -> Optional[Timestamp]: + """ + Cancels all matching delayed state events, i.e. remove them as long as they haven't been processed. + + Returns: The send time of the next delayed event to be sent, if any. + """ + + def cancel_delayed_state_events_txn( + txn: LoggingTransaction, + ) -> Optional[Timestamp]: + self.db_pool.simple_delete_txn( + txn, + table="delayed_events", + keyvalues={ + "room_id": room_id, + "event_type": event_type, + "state_key": state_key, + "is_processed": False, + }, + ) + return self._get_next_delayed_event_send_ts_txn(txn) + + return await self.db_pool.runInteraction( + "cancel_delayed_state_events", cancel_delayed_state_events_txn + ) + + async def delete_processed_delayed_event( + self, + delay_id: DelayID, + user_localpart: UserLocalpart, + ) -> None: + """ + Delete the matching delayed event, as long as it has been marked as processed. + + Throws: + StoreError: if there is no matching delayed event, or if it has not yet been processed. + """ + return await self.db_pool.simple_delete_one( + table="delayed_events", + keyvalues={ + "delay_id": delay_id, + "user_localpart": user_localpart, + "is_processed": True, + }, + desc="delete_processed_delayed_event", + ) + + async def delete_processed_delayed_state_events( + self, + *, + room_id: str, + event_type: str, + state_key: str, + ) -> None: + """ + Delete the matching delayed state events that have been marked as processed. + """ + await self.db_pool.simple_delete( + table="delayed_events", + keyvalues={ + "room_id": room_id, + "event_type": event_type, + "state_key": state_key, + "is_processed": True, + }, + desc="delete_processed_delayed_state_events", + ) + + async def unprocess_delayed_events(self) -> None: + """ + Unmark all delayed events for processing. + """ + await self.db_pool.simple_update( + table="delayed_events", + keyvalues={"is_processed": True}, + updatevalues={"is_processed": False}, + desc="unprocess_delayed_events", + ) + + async def get_next_delayed_event_send_ts(self) -> Optional[Timestamp]: + """ + Returns the send time of the next delayed event to be sent, if any. + """ + return await self.db_pool.runInteraction( + "get_next_delayed_event_send_ts", + self._get_next_delayed_event_send_ts_txn, + db_autocommit=True, + ) + + def _get_next_delayed_event_send_ts_txn( + self, txn: LoggingTransaction + ) -> Optional[Timestamp]: + result = self.db_pool.simple_select_one_onecol_txn( + txn, + table="delayed_events", + keyvalues={"is_processed": False}, + retcol="MIN(send_ts)", + allow_none=True, + ) + return Timestamp(result) if result is not None else None + + +def _generate_delay_id() -> DelayID: + """Generates an opaque string, for use as a delay ID""" + + # We use the following format for delay IDs: + # syd_ + # They are scoped to user localparts, so it is possible for + # the same ID to exist for multiple users. + + return DelayID(f"syd_{stringutils.random_string(20)}") diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index d8afa6da02..f171f4568a 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2021 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -19,7 +19,7 @@ # # -SCHEMA_VERSION = 87 # remember to update the list below when updating +SCHEMA_VERSION = 88 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -149,6 +149,10 @@ Changes in SCHEMA_VERSION = 87 - Add tables for storing the per-connection state for sliding sync requests: sliding_sync_connections, sliding_sync_connection_positions, sliding_sync_connection_required_state, sliding_sync_connection_room_configs, sliding_sync_connection_streams + +Changes in SCHEMA_VERSION = 88 + - MSC4140: Add `delayed_events` table that keeps track of events that are to + be posted in response to a resettable timeout or an on-demand action. """ diff --git a/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql b/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql new file mode 100644 index 0000000000..55bfbc8ae7 --- /dev/null +++ b/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql @@ -0,0 +1,30 @@ +CREATE TABLE delayed_events ( + delay_id TEXT NOT NULL, + user_localpart TEXT NOT NULL, + device_id TEXT, + delay BIGINT NOT NULL, + send_ts BIGINT NOT NULL, + room_id TEXT NOT NULL, + event_type TEXT NOT NULL, + state_key TEXT, + origin_server_ts BIGINT, + content bytea NOT NULL, + is_processed BOOLEAN NOT NULL DEFAULT FALSE, + PRIMARY KEY (user_localpart, delay_id) +); + +CREATE INDEX delayed_events_send_ts ON delayed_events (send_ts); +CREATE INDEX delayed_events_is_processed ON delayed_events (is_processed); +CREATE INDEX delayed_events_room_state_event_idx ON delayed_events (room_id, event_type, state_key) WHERE state_key IS NOT NULL; + +CREATE TABLE delayed_events_stream_pos ( + Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row. + stream_id BIGINT NOT NULL, + CHECK (Lock='X') +); + +-- Start processing events from the point this migration was run, rather +-- than the beginning of time. +INSERT INTO delayed_events_stream_pos ( + stream_id +) SELECT COALESCE(MAX(stream_ordering), 0) from events; diff --git a/tests/rest/client/test_delayed_events.py b/tests/rest/client/test_delayed_events.py new file mode 100644 index 0000000000..34d9fe7958 --- /dev/null +++ b/tests/rest/client/test_delayed_events.py @@ -0,0 +1,346 @@ +"""Tests REST events for /delayed_events paths.""" + +from http import HTTPStatus +from typing import List + +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.errors import Codes +from synapse.rest.client import delayed_events, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + +PATH_PREFIX = "/_matrix/client/unstable/org.matrix.msc4140/delayed_events" + +_HS_NAME = "red" +_EVENT_TYPE = "com.example.test" + + +class DelayedEventsTestCase(HomeserverTestCase): + """Tests getting and managing delayed events.""" + + servlets = [delayed_events.register_servlets, room.register_servlets] + user_id = f"@sid1:{_HS_NAME}" + + def default_config(self) -> JsonDict: + config = super().default_config() + config["server_name"] = _HS_NAME + config["max_event_delay_duration"] = "24h" + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.room_id = self.helper.create_room_as( + self.user_id, + extra_content={ + "preset": "trusted_private_chat", + }, + ) + + def test_delayed_events_empty_on_startup(self) -> None: + self.assertListEqual([], self._get_delayed_events()) + + def test_delayed_state_events_are_sent_on_timeout(self) -> None: + state_key = "to_send_on_timeout" + + setter_key = "setter" + setter_expected = "on_timeout" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 900), + { + setter_key: setter_expected, + }, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + content = self._get_delayed_event_content(events[0]) + self.assertEqual(setter_expected, content.get(setter_key), content) + self.helper.get_state( + self.room_id, + _EVENT_TYPE, + "", + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + self.reactor.advance(1) + self.assertListEqual([], self._get_delayed_events()) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + "", + state_key=state_key, + ) + self.assertEqual(setter_expected, content.get(setter_key), content) + + def test_update_delayed_event_without_id(self) -> None: + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/", + ) + self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, channel.result) + + def test_update_delayed_event_without_body(self) -> None: + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/abc", + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + Codes.NOT_JSON, + channel.json_body["errcode"], + ) + + def test_update_delayed_event_without_action(self) -> None: + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/abc", + {}, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + Codes.MISSING_PARAM, + channel.json_body["errcode"], + ) + + def test_update_delayed_event_with_invalid_action(self) -> None: + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/abc", + {"action": "oops"}, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + Codes.INVALID_PARAM, + channel.json_body["errcode"], + ) + + @parameterized.expand(["cancel", "restart", "send"]) + def test_update_delayed_event_without_match(self, action: str) -> None: + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/abc", + {"action": action}, + ) + self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, channel.result) + + def test_cancel_delayed_state_event(self) -> None: + state_key = "to_never_send" + + setter_key = "setter" + setter_expected = "none" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 1500), + { + setter_key: setter_expected, + }, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + delay_id = channel.json_body.get("delay_id") + self.assertIsNotNone(delay_id) + + self.reactor.advance(1) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + content = self._get_delayed_event_content(events[0]) + self.assertEqual(setter_expected, content.get(setter_key), content) + self.helper.get_state( + self.room_id, + _EVENT_TYPE, + "", + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/{delay_id}", + {"action": "cancel"}, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + self.assertListEqual([], self._get_delayed_events()) + + self.reactor.advance(1) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + "", + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + def test_send_delayed_state_event(self) -> None: + state_key = "to_send_on_request" + + setter_key = "setter" + setter_expected = "on_send" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 100000), + { + setter_key: setter_expected, + }, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + delay_id = channel.json_body.get("delay_id") + self.assertIsNotNone(delay_id) + + self.reactor.advance(1) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + content = self._get_delayed_event_content(events[0]) + self.assertEqual(setter_expected, content.get(setter_key), content) + self.helper.get_state( + self.room_id, + _EVENT_TYPE, + "", + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/{delay_id}", + {"action": "send"}, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + self.assertListEqual([], self._get_delayed_events()) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + "", + state_key=state_key, + ) + self.assertEqual(setter_expected, content.get(setter_key), content) + + def test_restart_delayed_state_event(self) -> None: + state_key = "to_send_on_restarted_timeout" + + setter_key = "setter" + setter_expected = "on_timeout" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 1500), + { + setter_key: setter_expected, + }, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + delay_id = channel.json_body.get("delay_id") + self.assertIsNotNone(delay_id) + + self.reactor.advance(1) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + content = self._get_delayed_event_content(events[0]) + self.assertEqual(setter_expected, content.get(setter_key), content) + self.helper.get_state( + self.room_id, + _EVENT_TYPE, + "", + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/{delay_id}", + {"action": "restart"}, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + self.reactor.advance(1) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + content = self._get_delayed_event_content(events[0]) + self.assertEqual(setter_expected, content.get(setter_key), content) + self.helper.get_state( + self.room_id, + _EVENT_TYPE, + "", + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + self.reactor.advance(1) + self.assertListEqual([], self._get_delayed_events()) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + "", + state_key=state_key, + ) + self.assertEqual(setter_expected, content.get(setter_key), content) + + def test_delayed_state_events_are_cancelled_by_more_recent_state(self) -> None: + state_key = "to_be_cancelled" + + setter_key = "setter" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 900), + { + setter_key: "on_timeout", + }, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + + setter_expected = "manual" + self.helper.send_state( + self.room_id, + _EVENT_TYPE, + { + setter_key: setter_expected, + }, + None, + state_key=state_key, + ) + self.assertListEqual([], self._get_delayed_events()) + + self.reactor.advance(1) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + "", + state_key=state_key, + ) + self.assertEqual(setter_expected, content.get(setter_key), content) + + def _get_delayed_events(self) -> List[JsonDict]: + channel = self.make_request( + "GET", + PATH_PREFIX, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + key = "delayed_events" + self.assertIn(key, channel.json_body) + + events = channel.json_body[key] + self.assertIsInstance(events, list) + + return events + + def _get_delayed_event_content(self, event: JsonDict) -> JsonDict: + key = "content" + self.assertIn(key, event) + + content = event[key] + self.assertIsInstance(content, dict) + + return content + + +def _get_path_for_delayed_state( + room_id: str, event_type: str, state_key: str, delay_ms: int +) -> str: + return f"rooms/{room_id}/state/{event_type}/{state_key}?org.matrix.msc4140.delay={delay_ms}" diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index c559dfda83..00be0051c6 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -2291,6 +2291,106 @@ class RoomMessageFilterTestCase(RoomBase): self.assertEqual(len(chunk), 2, [event["content"] for event in chunk]) +class RoomDelayedEventTestCase(RoomBase): + """Tests delayed events.""" + + user_id = "@sid1:red" + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.room_id = self.helper.create_room_as(self.user_id) + + @unittest.override_config({"max_event_delay_duration": "24h"}) + def test_send_delayed_invalid_event(self) -> None: + """Test sending a delayed event with invalid content.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {}, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertNotIn("org.matrix.msc4140.errcode", channel.json_body) + + def test_delayed_event_unsupported_by_default(self) -> None: + """Test that sending a delayed event is unsupported with the default config.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {"body": "test", "msgtype": "m.text"}, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + "M_MAX_DELAY_UNSUPPORTED", + channel.json_body.get("org.matrix.msc4140.errcode"), + channel.json_body, + ) + + @unittest.override_config({"max_event_delay_duration": "1000"}) + def test_delayed_event_exceeds_max_delay(self) -> None: + """Test that sending a delayed event fails if its delay is longer than allowed.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {"body": "test", "msgtype": "m.text"}, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + "M_MAX_DELAY_EXCEEDED", + channel.json_body.get("org.matrix.msc4140.errcode"), + channel.json_body, + ) + + @unittest.override_config({"max_event_delay_duration": "24h"}) + def test_delayed_event_with_negative_delay(self) -> None: + """Test that sending a delayed event fails if its delay is negative.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=-2000" + % self.room_id + ).encode("ascii"), + {"body": "test", "msgtype": "m.text"}, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + Codes.INVALID_PARAM, channel.json_body["errcode"], channel.json_body + ) + + @unittest.override_config({"max_event_delay_duration": "24h"}) + def test_send_delayed_message_event(self) -> None: + """Test sending a valid delayed message event.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {"body": "test", "msgtype": "m.text"}, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + @unittest.override_config({"max_event_delay_duration": "24h"}) + def test_send_delayed_state_event(self) -> None: + """Test sending a valid delayed state event.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/state/m.room.topic/?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {"topic": "This is a topic"}, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + class RoomSearchTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, From aad26cb93f9428558b5c18b6c58897ff82c2ed59 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 Sep 2024 11:07:23 +0100 Subject: [PATCH 266/332] Never return negative bump stamp (#17748) Fixes #17737 --- changelog.d/17748.bugfix | 1 + synapse/handlers/sliding_sync/__init__.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 changelog.d/17748.bugfix diff --git a/changelog.d/17748.bugfix b/changelog.d/17748.bugfix new file mode 100644 index 0000000000..dda8331f57 --- /dev/null +++ b/changelog.d/17748.bugfix @@ -0,0 +1 @@ +Fix bug in sliding sync where the server would incorrectly return a negative bump stamp, which caused Element X apps to stop syncing. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 5206af22ec..9fcc68ff25 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -1063,6 +1063,22 @@ class SlidingSyncHandler: if new_bump_stamp is not None: bump_stamp = new_bump_stamp + if bump_stamp < 0: + # We never want to send down negative stream orderings, as you can't + # sensibly compare positive and negative stream orderings (they have + # different meanings). + # + # A negative bump stamp here can only happen if the stream ordering + # of the membership event is negative (and there are no further bump + # stamps), which can happen if the server leaves and deletes a room, + # and then rejoins it. + # + # To deal with this, we just set the bump stamp to zero, which will + # shove this room to the bottom of the list. This is OK as the + # moment a new message happens in the room it will get put into a + # sensible order again. + bump_stamp = 0 + unstable_expanded_timeline = False prev_room_sync_config = previous_connection_state.room_configs.get(room_id) # Record the `room_sync_config` if we're `ignore_timeline_bound` (which means From 443a9eb335d5b3716edbc01681f692ec49b08d4d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 18:33:57 +0200 Subject: [PATCH 267/332] Bump bytes from 1.7.1 to 1.7.2 (#17743) Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.7.1 to 1.7.2.
Release notes

Sourced from bytes's releases.

Bytes 1.7.2

1.7.2 (September 17, 2024)

Fixed

  • Fix default impl of Buf::{get_int, get_int_le} (#732)

Documented

  • Fix double spaces in comments and doc comments (#731)

Internal changes

  • Ensure BytesMut::advance reduces capacity (#728)
Changelog

Sourced from bytes's changelog.

1.7.2 (September 17, 2024)

Fixed

  • Fix default impl of Buf::{get_int, get_int_le} (#732)

Documented

  • Fix double spaces in comments and doc comments (#731)

Internal changes

  • Ensure BytesMut::advance reduces capacity (#728)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=bytes&package-manager=cargo&previous-version=1.7.1&new-version=1.7.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b438b2cac0..a6cab1085d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "cfg-if" From ac8c9ac50d82fc85d11257cf3d5b0b88713ac374 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 18:50:57 +0200 Subject: [PATCH 268/332] Bump python-multipart from 0.0.9 to 0.0.10 (#17745) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [python-multipart](https://github.com/Kludex/python-multipart) from 0.0.9 to 0.0.10.
Release notes

Sourced from python-multipart's releases.

Version 0.0.10

What's Changed

New Contributors

Full Changelog: https://github.com/Kludex/python-multipart/compare/0.0.9...0.0.10

Changelog

Sourced from python-multipart's changelog.

0.0.10 (2024-09-21)

  • Support on_header_begin #103.
  • Improve type hints on FormParser #104.
  • Fix OnFileCallback type #106.
  • Improve type hints #110.
  • Improve type hints on File #111.
  • Add type hint to helper functions #112.
  • Minor fix for Field.repr #114.
  • Fix use of chunk_size parameter #136.
  • Allow digits and valid token chars in headers #134.
  • Fix headers being carried between parts #135.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=python-multipart&package-manager=pip&previous-version=0.0.9&new-version=0.0.10)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index fad6dfdaa6..3baf61e0b1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1962,18 +1962,15 @@ six = ">=1.5" [[package]] name = "python-multipart" -version = "0.0.9" +version = "0.0.10" description = "A streaming multipart parser for Python" optional = false python-versions = ">=3.8" files = [ - {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"}, - {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, + {file = "python_multipart-0.0.10-py3-none-any.whl", hash = "sha256:2b06ad9e8d50c7a8db80e3b56dab590137b323410605af2be20d62a5f1ba1dc8"}, + {file = "python_multipart-0.0.10.tar.gz", hash = "sha256:46eb3c6ce6fdda5fb1a03c7e11d490e407c6930a2703fe7aef4da71c374688fa"}, ] -[package.extras] -dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"] - [[package]] name = "pytz" version = "2022.7.1" From af2da0e47aee2970722d1e4ec4ca12924fc42765 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 18:51:07 +0200 Subject: [PATCH 269/332] Bump pyasn1-modules from 0.4.0 to 0.4.1 (#17747) Bumps [pyasn1-modules](https://github.com/pyasn1/pyasn1-modules) from 0.4.0 to 0.4.1.
Release notes

Sourced from pyasn1-modules's releases.

Release 0.4.1

It's a minor release.

  • Added support for Python 3.13.

All changes are noted in the CHANGELOG.

Changelog

Sourced from pyasn1-modules's changelog.

Revision 0.4.1, released 10-09-2024

  • Added support for Python 3.13
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyasn1-modules&package-manager=pip&previous-version=0.4.0&new-version=0.4.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3baf61e0b1..0c83761564 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1643,13 +1643,13 @@ files = [ [[package]] name = "pyasn1-modules" -version = "0.4.0" +version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = false python-versions = ">=3.8" files = [ - {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, - {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, ] [package.dependencies] From afc3af7763b2b939c400f7a0dbd4ec2b7732b9d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 18:51:24 +0200 Subject: [PATCH 270/332] Bump prometheus-client from 0.20.0 to 0.21.0 (#17746) Bumps [prometheus-client](https://github.com/prometheus/client_python) from 0.20.0 to 0.21.0.
Release notes

Sourced from prometheus-client's releases.

0.21.0 / 2024-09-20

What's Changed

[CHANGE] Reject invalid (not GET or OPTION) HTTP methods. #1019 [ENHANCEMENT] Allow writing metrics when holding a lock for the metric in the same thread. #1014 [BUGFIX] Check for and error on None label values. #1012 [BUGFIX] Fix timestamp comparison. #1038

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=prometheus-client&package-manager=pip&previous-version=0.20.0&new-version=0.21.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0c83761564..3425bf26df 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1569,13 +1569,13 @@ files = [ [[package]] name = "prometheus-client" -version = "0.20.0" +version = "0.21.0" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.8" files = [ - {file = "prometheus_client-0.20.0-py3-none-any.whl", hash = "sha256:cde524a85bce83ca359cc837f28b8c0db5cac7aa653a588fd7e84ba061c329e7"}, - {file = "prometheus_client-0.20.0.tar.gz", hash = "sha256:287629d00b147a32dcb2be0b9df905da599b2d82f80377083ec8463309a4bb89"}, + {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"}, + {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"}, ] [package.extras] From 985b3ab58d2ebb4c4a294aa783d5654c4f5fa1fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 17:21:38 +0000 Subject: [PATCH 271/332] Bump types-pyyaml from 6.0.12.20240808 to 6.0.12.20240917 (#17755) Bumps [types-pyyaml](https://github.com/python/typeshed) from 6.0.12.20240808 to 6.0.12.20240917.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=types-pyyaml&package-manager=pip&previous-version=6.0.12.20240808&new-version=6.0.12.20240917)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3425bf26df..1fad9ee1cd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2795,13 +2795,13 @@ types-cffi = "*" [[package]] name = "types-pyyaml" -version = "6.0.12.20240808" +version = "6.0.12.20240917" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.8" files = [ - {file = "types-PyYAML-6.0.12.20240808.tar.gz", hash = "sha256:b8f76ddbd7f65440a8bda5526a9607e4c7a322dc2f8e1a8c405644f9a6f4b9af"}, - {file = "types_PyYAML-6.0.12.20240808-py3-none-any.whl", hash = "sha256:deda34c5c655265fc517b546c902aa6eed2ef8d3e921e4765fe606fe2afe8d35"}, + {file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"}, + {file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"}, ] [[package]] From e4b0cd87cca731db894fbb3f169ee460286607ba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 17:28:08 +0000 Subject: [PATCH 272/332] Bump pydantic from 2.8.2 to 2.9.2 (#17756) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [pydantic](https://github.com/pydantic/pydantic) from 2.8.2 to 2.9.2.
Release notes

Sourced from pydantic's releases.

v2.9.2 (2024-09-17)

What's Changed

Fixes

Full Changelog: https://github.com/pydantic/pydantic/compare/v2.9.1...v2.9.2

v2.9.1 (2024-09-09)

What's Changed

Fixes

Full Changelog: https://github.com/pydantic/pydantic/compare/v2.9.0...v2.9.1

v2.9.0 (2024-09-05)

The code released in v2.9.0 is practically identical to that of v2.9.0b2.

Check out our blog post to learn more about the release highlights!

What's Changed

Packaging

New Features

... (truncated)

Changelog

Sourced from pydantic's changelog.

v2.9.2 (2024-09-17)

GitHub release

What's Changed

Fixes

v2.9.1 (2024-09-09)

GitHub release

What's Changed

Fixes

v2.9.0 (2024-09-05)

GitHub release

The code released in v2.9.0 is practically identical to that of v2.9.0b2.

What's Changed

Packaging

New Features

... (truncated)

Commits
  • 7cedbfb history updates
  • 7eab2b8 v bump
  • c0a288f Fix ZoneInfo with various invalid types (#10408)
  • ea6115d Fix variance issue in _IncEx type alias, only allow True (#10414)
  • fbfe25a Fix serialization schema generation when using PlainValidator (#10427)
  • 26cff3c Adding notes on designing callable discriminators (#10400)
  • 8a0e7ad Do not error when trying to evaluate annotations of private attributes (#10358)
  • ecc5275 bump
  • 2c61bfd Fix evaluation of stringified annotations during namespace inspection (#10347)
  • 3d364cb Use correct types namespace when building namedtuple core schemas (#10337)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pydantic&package-manager=pip&previous-version=2.8.2&new-version=2.9.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 199 ++++++++++++++++++++++++++-------------------------- 1 file changed, 100 insertions(+), 99 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1fad9ee1cd..055ba364c5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,13 +2,13 @@ [[package]] name = "annotated-types" -version = "0.5.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "annotated_types-0.5.0-py3-none-any.whl", hash = "sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd"}, - {file = "annotated_types-0.5.0.tar.gz", hash = "sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] [package.dependencies] @@ -1668,18 +1668,18 @@ files = [ [[package]] name = "pydantic" -version = "2.8.2" +version = "2.9.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" +annotated-types = ">=0.6.0" +pydantic-core = "2.23.4" typing-extensions = [ {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, {version = ">=4.6.1", markers = "python_version < \"3.13\""}, @@ -1687,103 +1687,104 @@ typing-extensions = [ [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.20.1" +version = "2.23.4" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, ] [package.dependencies] From b066b3aa04248260bd0cfb2ac3569a6e4a3236ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 17:30:24 +0000 Subject: [PATCH 273/332] Bump types-setuptools from 74.1.0.20240907 to 75.1.0.20240917 (#17757) Bumps [types-setuptools](https://github.com/python/typeshed) from 74.1.0.20240907 to 75.1.0.20240917.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=types-setuptools&package-manager=pip&previous-version=74.1.0.20240907&new-version=75.1.0.20240917)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 055ba364c5..9126b3417b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2821,13 +2821,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "74.1.0.20240907" +version = "75.1.0.20240917" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types-setuptools-74.1.0.20240907.tar.gz", hash = "sha256:0abdb082552ca966c1e5fc244e4853adc62971f6cd724fb1d8a3713b580e5a65"}, - {file = "types_setuptools-74.1.0.20240907-py3-none-any.whl", hash = "sha256:15b38c8e63ca34f42f6063ff4b1dd662ea20086166d5ad6a102e670a52574120"}, + {file = "types-setuptools-75.1.0.20240917.tar.gz", hash = "sha256:12f12a165e7ed383f31def705e5c0fa1c26215dd466b0af34bd042f7d5331f55"}, + {file = "types_setuptools-75.1.0.20240917-py3-none-any.whl", hash = "sha256:06f78307e68d1bbde6938072c57b81cf8a99bc84bd6dc7e4c5014730b097dc0c"}, ] [[package]] From b89a66f831097fc75c729a69e575d8755391bb91 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 00:20:24 +0200 Subject: [PATCH 274/332] Bump idna from 3.8 to 3.10 (#17758) --- poetry.lock | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9126b3417b..ef146de8cc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -608,15 +608,18 @@ idna = ">=2.5" [[package]] name = "idna" -version = "3.8" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" files = [ - {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, - {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "ijson" version = "3.3.0" From 89e7609f5c368c755bff5c242d7f97dd67ff79ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 22:34:37 +0000 Subject: [PATCH 275/332] Bump msgpack from 1.0.8 to 1.1.0 (#17759) --- poetry.lock | 122 ++++++++++++++++++++++++++++------------------------ 1 file changed, 65 insertions(+), 57 deletions(-) diff --git a/poetry.lock b/poetry.lock index ef146de8cc..65d83deefc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1246,67 +1246,75 @@ files = [ [[package]] name = "msgpack" -version = "1.0.8" +version = "1.1.0" description = "MessagePack serializer" optional = false python-versions = ">=3.8" files = [ - {file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:505fe3d03856ac7d215dbe005414bc28505d26f0c128906037e66d98c4e95868"}, - {file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6b7842518a63a9f17107eb176320960ec095a8ee3b4420b5f688e24bf50c53c"}, - {file = "msgpack-1.0.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:376081f471a2ef24828b83a641a02c575d6103a3ad7fd7dade5486cad10ea659"}, - {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e390971d082dba073c05dbd56322427d3280b7cc8b53484c9377adfbae67dc2"}, - {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e073efcba9ea99db5acef3959efa45b52bc67b61b00823d2a1a6944bf45982"}, - {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82d92c773fbc6942a7a8b520d22c11cfc8fd83bba86116bfcf962c2f5c2ecdaa"}, - {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9ee32dcb8e531adae1f1ca568822e9b3a738369b3b686d1477cbc643c4a9c128"}, - {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e3aa7e51d738e0ec0afbed661261513b38b3014754c9459508399baf14ae0c9d"}, - {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69284049d07fce531c17404fcba2bb1df472bc2dcdac642ae71a2d079d950653"}, - {file = "msgpack-1.0.8-cp310-cp310-win32.whl", hash = "sha256:13577ec9e247f8741c84d06b9ece5f654920d8365a4b636ce0e44f15e07ec693"}, - {file = "msgpack-1.0.8-cp310-cp310-win_amd64.whl", hash = "sha256:e532dbd6ddfe13946de050d7474e3f5fb6ec774fbb1a188aaf469b08cf04189a"}, - {file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9517004e21664f2b5a5fd6333b0731b9cf0817403a941b393d89a2f1dc2bd836"}, - {file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d16a786905034e7e34098634b184a7d81f91d4c3d246edc6bd7aefb2fd8ea6ad"}, - {file = "msgpack-1.0.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2872993e209f7ed04d963e4b4fbae72d034844ec66bc4ca403329db2074377b"}, - {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c330eace3dd100bdb54b5653b966de7f51c26ec4a7d4e87132d9b4f738220ba"}, - {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b5c044f3eff2a6534768ccfd50425939e7a8b5cf9a7261c385de1e20dcfc85"}, - {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1876b0b653a808fcd50123b953af170c535027bf1d053b59790eebb0aeb38950"}, - {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dfe1f0f0ed5785c187144c46a292b8c34c1295c01da12e10ccddfc16def4448a"}, - {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3528807cbbb7f315bb81959d5961855e7ba52aa60a3097151cb21956fbc7502b"}, - {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e2f879ab92ce502a1e65fce390eab619774dda6a6ff719718069ac94084098ce"}, - {file = "msgpack-1.0.8-cp311-cp311-win32.whl", hash = "sha256:26ee97a8261e6e35885c2ecd2fd4a6d38252246f94a2aec23665a4e66d066305"}, - {file = "msgpack-1.0.8-cp311-cp311-win_amd64.whl", hash = "sha256:eadb9f826c138e6cf3c49d6f8de88225a3c0ab181a9b4ba792e006e5292d150e"}, - {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:114be227f5213ef8b215c22dde19532f5da9652e56e8ce969bf0a26d7c419fee"}, - {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d661dc4785affa9d0edfdd1e59ec056a58b3dbb9f196fa43587f3ddac654ac7b"}, - {file = "msgpack-1.0.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d56fd9f1f1cdc8227d7b7918f55091349741904d9520c65f0139a9755952c9e8"}, - {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0726c282d188e204281ebd8de31724b7d749adebc086873a59efb8cf7ae27df3"}, - {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8db8e423192303ed77cff4dce3a4b88dbfaf43979d280181558af5e2c3c71afc"}, - {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99881222f4a8c2f641f25703963a5cefb076adffd959e0558dc9f803a52d6a58"}, - {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b5505774ea2a73a86ea176e8a9a4a7c8bf5d521050f0f6f8426afe798689243f"}, - {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ef254a06bcea461e65ff0373d8a0dd1ed3aa004af48839f002a0c994a6f72d04"}, - {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e1dd7839443592d00e96db831eddb4111a2a81a46b028f0facd60a09ebbdd543"}, - {file = "msgpack-1.0.8-cp312-cp312-win32.whl", hash = "sha256:64d0fcd436c5683fdd7c907eeae5e2cbb5eb872fafbc03a43609d7941840995c"}, - {file = "msgpack-1.0.8-cp312-cp312-win_amd64.whl", hash = "sha256:74398a4cf19de42e1498368c36eed45d9528f5fd0155241e82c4082b7e16cffd"}, - {file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ceea77719d45c839fd73abcb190b8390412a890df2f83fb8cf49b2a4b5c2f40"}, - {file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ab0bbcd4d1f7b6991ee7c753655b481c50084294218de69365f8f1970d4c151"}, - {file = "msgpack-1.0.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1cce488457370ffd1f953846f82323cb6b2ad2190987cd4d70b2713e17268d24"}, - {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3923a1778f7e5ef31865893fdca12a8d7dc03a44b33e2a5f3295416314c09f5d"}, - {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22e47578b30a3e199ab067a4d43d790249b3c0587d9a771921f86250c8435db"}, - {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd739c9251d01e0279ce729e37b39d49a08c0420d3fee7f2a4968c0576678f77"}, - {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d3420522057ebab1728b21ad473aa950026d07cb09da41103f8e597dfbfaeb13"}, - {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5845fdf5e5d5b78a49b826fcdc0eb2e2aa7191980e3d2cfd2a30303a74f212e2"}, - {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a0e76621f6e1f908ae52860bdcb58e1ca85231a9b0545e64509c931dd34275a"}, - {file = "msgpack-1.0.8-cp38-cp38-win32.whl", hash = "sha256:374a8e88ddab84b9ada695d255679fb99c53513c0a51778796fcf0944d6c789c"}, - {file = "msgpack-1.0.8-cp38-cp38-win_amd64.whl", hash = "sha256:f3709997b228685fe53e8c433e2df9f0cdb5f4542bd5114ed17ac3c0129b0480"}, - {file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f51bab98d52739c50c56658cc303f190785f9a2cd97b823357e7aeae54c8f68a"}, - {file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:73ee792784d48aa338bba28063e19a27e8d989344f34aad14ea6e1b9bd83f596"}, - {file = "msgpack-1.0.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f9904e24646570539a8950400602d66d2b2c492b9010ea7e965025cb71d0c86d"}, - {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e75753aeda0ddc4c28dce4c32ba2f6ec30b1b02f6c0b14e547841ba5b24f753f"}, - {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dbf059fb4b7c240c873c1245ee112505be27497e90f7c6591261c7d3c3a8228"}, - {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4916727e31c28be8beaf11cf117d6f6f188dcc36daae4e851fee88646f5b6b18"}, - {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7938111ed1358f536daf311be244f34df7bf3cdedb3ed883787aca97778b28d8"}, - {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:493c5c5e44b06d6c9268ce21b302c9ca055c1fd3484c25ba41d34476c76ee746"}, - {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273"}, - {file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"}, - {file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"}, - {file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"}, + {file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"}, + {file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"}, + {file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"}, + {file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"}, + {file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"}, + {file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"}, + {file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"}, + {file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"}, + {file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"}, + {file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"}, + {file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"}, + {file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"}, + {file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"}, ] [[package]] From f68e8d0021047c93bd8bb13a693903fd63effad5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 22:48:43 +0000 Subject: [PATCH 276/332] Bump ruff from 0.6.5 to 0.6.7 (#17760) --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index 65d83deefc..b3985df12a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2277,29 +2277,29 @@ files = [ [[package]] name = "ruff" -version = "0.6.5" +version = "0.6.7" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.5-py3-none-linux_armv6l.whl", hash = "sha256:7e4e308f16e07c95fc7753fc1aaac690a323b2bb9f4ec5e844a97bb7fbebd748"}, - {file = "ruff-0.6.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:932cd69eefe4daf8c7d92bd6689f7e8182571cb934ea720af218929da7bd7d69"}, - {file = "ruff-0.6.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3a8d42d11fff8d3143ff4da41742a98f8f233bf8890e9fe23077826818f8d680"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a50af6e828ee692fb10ff2dfe53f05caecf077f4210fae9677e06a808275754f"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:794ada3400a0d0b89e3015f1a7e01f4c97320ac665b7bc3ade24b50b54cb2972"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:381413ec47f71ce1d1c614f7779d88886f406f1fd53d289c77e4e533dc6ea200"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:52e75a82bbc9b42e63c08d22ad0ac525117e72aee9729a069d7c4f235fc4d276"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09c72a833fd3551135ceddcba5ebdb68ff89225d30758027280968c9acdc7810"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:800c50371bdcb99b3c1551d5691e14d16d6f07063a518770254227f7f6e8c178"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e25ddd9cd63ba1f3bd51c1f09903904a6adf8429df34f17d728a8fa11174253"}, - {file = "ruff-0.6.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:7291e64d7129f24d1b0c947ec3ec4c0076e958d1475c61202497c6aced35dd19"}, - {file = "ruff-0.6.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9ad7dfbd138d09d9a7e6931e6a7e797651ce29becd688be8a0d4d5f8177b4b0c"}, - {file = "ruff-0.6.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:005256d977021790cc52aa23d78f06bb5090dc0bfbd42de46d49c201533982ae"}, - {file = "ruff-0.6.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:482c1e6bfeb615eafc5899127b805d28e387bd87db38b2c0c41d271f5e58d8cc"}, - {file = "ruff-0.6.5-py3-none-win32.whl", hash = "sha256:cf4d3fa53644137f6a4a27a2b397381d16454a1566ae5335855c187fbf67e4f5"}, - {file = "ruff-0.6.5-py3-none-win_amd64.whl", hash = "sha256:3e42a57b58e3612051a636bc1ac4e6b838679530235520e8f095f7c44f706ff9"}, - {file = "ruff-0.6.5-py3-none-win_arm64.whl", hash = "sha256:51935067740773afdf97493ba9b8231279e9beef0f2a8079188c4776c25688e0"}, - {file = "ruff-0.6.5.tar.gz", hash = "sha256:4d32d87fab433c0cf285c3683dd4dae63be05fd7a1d65b3f5bf7cdd05a6b96fb"}, + {file = "ruff-0.6.7-py3-none-linux_armv6l.whl", hash = "sha256:08277b217534bfdcc2e1377f7f933e1c7957453e8a79764d004e44c40db923f2"}, + {file = "ruff-0.6.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:c6707a32e03b791f4448dc0dce24b636cbcdee4dd5607adc24e5ee73fd86c00a"}, + {file = "ruff-0.6.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:533d66b7774ef224e7cf91506a7dafcc9e8ec7c059263ec46629e54e7b1f90ab"}, + {file = "ruff-0.6.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17a86aac6f915932d259f7bec79173e356165518859f94649d8c50b81ff087e9"}, + {file = "ruff-0.6.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3f8822defd260ae2460ea3832b24d37d203c3577f48b055590a426a722d50ef"}, + {file = "ruff-0.6.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ba4efe5c6dbbb58be58dd83feedb83b5e95c00091bf09987b4baf510fee5c99"}, + {file = "ruff-0.6.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:525201b77f94d2b54868f0cbe5edc018e64c22563da6c5c2e5c107a4e85c1c0d"}, + {file = "ruff-0.6.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8854450839f339e1049fdbe15d875384242b8e85d5c6947bb2faad33c651020b"}, + {file = "ruff-0.6.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f0b62056246234d59cbf2ea66e84812dc9ec4540518e37553513392c171cb18"}, + {file = "ruff-0.6.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b1462fa56c832dc0cea5b4041cfc9c97813505d11cce74ebc6d1aae068de36b"}, + {file = "ruff-0.6.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:02b083770e4cdb1495ed313f5694c62808e71764ec6ee5db84eedd82fd32d8f5"}, + {file = "ruff-0.6.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0c05fd37013de36dfa883a3854fae57b3113aaa8abf5dea79202675991d48624"}, + {file = "ruff-0.6.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f49c9caa28d9bbfac4a637ae10327b3db00f47d038f3fbb2195c4d682e925b14"}, + {file = "ruff-0.6.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a0e1655868164e114ba43a908fd2d64a271a23660195017c17691fb6355d59bb"}, + {file = "ruff-0.6.7-py3-none-win32.whl", hash = "sha256:a939ca435b49f6966a7dd64b765c9df16f1faed0ca3b6f16acdf7731969deb35"}, + {file = "ruff-0.6.7-py3-none-win_amd64.whl", hash = "sha256:590445eec5653f36248584579c06252ad2e110a5d1f32db5420de35fb0e1c977"}, + {file = "ruff-0.6.7-py3-none-win_arm64.whl", hash = "sha256:b28f0d5e2f771c1fe3c7a45d3f53916fc74a480698c4b5731f0bea61e52137c8"}, + {file = "ruff-0.6.7.tar.gz", hash = "sha256:44e52129d82266fa59b587e2cd74def5637b730a69c4542525dfdecfaae38bd5"}, ] [[package]] @@ -3113,4 +3113,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "0c833ab57d2082e1ebe2627aef122ce4f93c1abe1f9d8739d5ea3fe52c79fa3f" +content-hash = "93c267fac3428b764f954e6faa17937b9c97b1ed2bdafc41dd8f6cb5d2ce085b" diff --git a/pyproject.toml b/pyproject.toml index 445f4dcad6..480ab7f375 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.6.5" +ruff = "0.6.7" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From b0d2aca16475c73bfb2e7225ab75e873ffc272f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 06:38:37 +0000 Subject: [PATCH 277/332] Bump phonenumbers from 8.13.44 to 8.13.45 (#17762) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index b3985df12a..e1f8ec3275 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1447,13 +1447,13 @@ dev = ["jinja2"] [[package]] name = "phonenumbers" -version = "8.13.44" +version = "8.13.45" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.44-py2.py3-none-any.whl", hash = "sha256:52cd02865dab1428ca9e89d442629b61d407c7dc687cfb80a3e8d068a584513c"}, - {file = "phonenumbers-8.13.44.tar.gz", hash = "sha256:2175021e84ee4e41b43c890f2d0af51f18c6ca9ad525886d6d6e4ea882e46fac"}, + {file = "phonenumbers-8.13.45-py2.py3-none-any.whl", hash = "sha256:bf05ec20fcd13f0d53e43a34ed7bd1c8be26a72b88fce4b8c64fca5b4641987a"}, + {file = "phonenumbers-8.13.45.tar.gz", hash = "sha256:53679a95b6060fd5e15467759252c87933d8566d6a5be00995a579eb0e02435b"}, ] [[package]] From 2fc43e4219b3a2dbaf3a8fca86675411daf36612 Mon Sep 17 00:00:00 2001 From: V02460 Date: Wed, 25 Sep 2024 11:15:34 +0200 Subject: [PATCH 278/332] Remove the deprecated cgi module (#17741) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removes all uses of the `cgi` module from Synapse. It was deprecated in Python version 3.11 and removed in version 3.13 ([“dead battery”](https://docs.python.org/3.13/whatsnew/3.13.html#pep-594-remove-dead-batteries-from-the-standard-library)). ### Pull Request Checklist * [x] Pull request is based on the develop branch * [x] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [x] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --------- Co-authored-by: Quentin Gliech --- changelog.d/17741.misc | 1 + contrib/graph/graph.py | 4 ++-- synapse/http/matrixfederationclient.py | 6 +++--- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17741.misc diff --git a/changelog.d/17741.misc b/changelog.d/17741.misc new file mode 100644 index 0000000000..119c81edab --- /dev/null +++ b/changelog.d/17741.misc @@ -0,0 +1 @@ +Remove usage of the deprecated cgi module. \ No newline at end of file diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py index 779590768f..1d74fee822 100644 --- a/contrib/graph/graph.py +++ b/contrib/graph/graph.py @@ -20,8 +20,8 @@ # import argparse -import cgi import datetime +import html import json import urllib.request from typing import List @@ -85,7 +85,7 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None: "name": name, "type": pdu.get("pdu_type"), "state_key": pdu.get("state_key"), - "content": cgi.escape(json.dumps(pdu.get("content")), quote=True), + "content": html.escape(json.dumps(pdu.get("content")), quote=True), "time": t, "depth": pdu.get("depth"), } diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index ecbbb6cfc4..b9ecdc2733 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -19,7 +19,6 @@ # # import abc -import cgi import codecs import logging import random @@ -1813,8 +1812,9 @@ def check_content_type_is(headers: Headers, expected_content_type: str) -> None: ) c_type = content_type_headers[0].decode("ascii") # only the first header - val, options = cgi.parse_header(c_type) - if val != expected_content_type: + # Extract the 'essence' of the mimetype, removing any parameter + c_type_parsed = c_type.split(";", 1)[0].strip() + if c_type_parsed != expected_content_type: raise RequestSendFailed( RuntimeError( f"Remote server sent Content-Type header of '{c_type}', not '{expected_content_type}'", From f53a3a56e2d128dc665a20d857b85de0497a1869 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 11:19:03 +0200 Subject: [PATCH 279/332] Bump treq from 23.11.0 to 24.9.1 (#17744) Bumps [treq](https://github.com/twisted/treq) from 23.11.0 to 24.9.1.
Release notes

Sourced from treq's releases.

Treq 24.9.0

Features

  • treq now ships type annotations. (#366)
  • The new treq.cookies module provides helper functions for working with http.cookiejar.Cookie and CookieJar objects. (#384)
  • Python 3.13 is now supported. (#391)

Bugfixes

  • treq.content.text_content() no longer generates deprecation warnings due to use of the cgi module. (#355)

Deprecations and Removals

  • Mixing the json argument with files or data now raises TypeError. (#297)
  • Passing non-string (str or bytes) values as part of a dict to the headers argument now results in a TypeError, as does passing any collection other than a dict or Headers instance. (#302)
  • Support for Python 3.7 and PyPy 3.8, which have reached end of support, has been dropped. (#378)

Misc

Changelog

Sourced from treq's changelog.

24.9.1 (2024-09-19)

Bugfixes

  • treq has vendored its dependency on the multipart library to avoid import conflicts with python-multipart; it should now be installable alongside that library. ([#399](https://github.com/twisted/treq/issues/399) <https://github.com/twisted/treq/issues/399>__)

24.9.0 (2024-09-17)

Features

  • treq now ships type annotations. ([#366](https://github.com/twisted/treq/issues/366) <https://github.com/twisted/treq/issues/366>__)
  • The new :mod:treq.cookies module provides helper functions for working with http.cookiejar.Cookie and CookieJar objects. ([#384](https://github.com/twisted/treq/issues/384) <https://github.com/twisted/treq/issues/384>__)
  • Python 3.13 is now supported. ([#391](https://github.com/twisted/treq/issues/391) <https://github.com/twisted/treq/issues/391>__)

Bugfixes

  • :mod:treq.content.text_content() no longer generates deprecation warnings due to use of the cgi module. ([#355](https://github.com/twisted/treq/issues/355) <https://github.com/twisted/treq/issues/355>__)

Deprecations and Removals

  • Mixing the json argument with files or data now raises TypeError. ([#297](https://github.com/twisted/treq/issues/297) <https://github.com/twisted/treq/issues/297>__)
  • Passing non-string (str or bytes) values as part of a dict to the headers argument now results in a TypeError, as does passing any collection other than a dict or Headers instance. ([#302](https://github.com/twisted/treq/issues/302) <https://github.com/twisted/treq/issues/302>__)
  • Support for Python 3.7 and PyPy 3.8, which have reached end of support, has been dropped. ([#378](https://github.com/twisted/treq/issues/378) <https://github.com/twisted/treq/issues/378>__)

Misc

  • [#336](https://github.com/twisted/treq/issues/336) <https://github.com/twisted/treq/issues/336>, [#382](https://github.com/twisted/treq/issues/382) <https://github.com/twisted/treq/issues/382>, [#395](https://github.com/twisted/treq/issues/395) <https://github.com/twisted/treq/issues/395>__
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=treq&package-manager=pip&previous-version=23.11.0&new-version=24.9.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Quentin Gliech --- changelog.d/17744.misc | 1 + poetry.lock | 9 +++++---- synapse/http/matrixfederationclient.py | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17744.misc diff --git a/changelog.d/17744.misc b/changelog.d/17744.misc new file mode 100644 index 0000000000..e4f7cc8efa --- /dev/null +++ b/changelog.d/17744.misc @@ -0,0 +1 @@ +Fix typing of a variable that is not `Unknown` anymore after updating `treq`. diff --git a/poetry.lock b/poetry.lock index e1f8ec3275..0f2ee8cac0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2587,13 +2587,13 @@ dev = ["furo (>=2024.05.06)", "nox", "packaging", "sphinx (>=5)", "twisted"] [[package]] name = "treq" -version = "23.11.0" +version = "24.9.1" description = "High-level Twisted HTTP Client API" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "treq-23.11.0-py3-none-any.whl", hash = "sha256:f494c2218d61cab2cabbee37cd6606d3eea9d16cf14190323095c95d22c467e9"}, - {file = "treq-23.11.0.tar.gz", hash = "sha256:0914ff929fd1632ce16797235260f8bc19d20ff7c459c1deabd65b8c68cbeac5"}, + {file = "treq-24.9.1-py3-none-any.whl", hash = "sha256:eee4756fd9a857c77f180fd5202b52c518f2d3e2826dce28b89066c03bfc45d0"}, + {file = "treq-24.9.1.tar.gz", hash = "sha256:15da7fc404f3e4ed59d0abe5f8eef4966fabbe618039a2a23bc7c15305cefea8"}, ] [package.dependencies] @@ -2602,6 +2602,7 @@ hyperlink = ">=21.0.0" incremental = "*" requests = ">=2.1.0" Twisted = {version = ">=22.10.0", extras = ["tls"]} +typing-extensions = ">=3.10.0" [package.extras] dev = ["httpbin (==0.7.0)", "pep8", "pyflakes", "werkzeug (==2.0.3)"] diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index b9ecdc2733..e658c68e23 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -791,7 +791,7 @@ class MatrixFederationHttpClient: url_str, _flatten_response_never_received(e), ) - body = None + body = b"" exc = HttpResponseException( response.code, response_phrase, body From 386cabda83b3b63156bb2f77fe0e93339c0258cf Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 25 Sep 2024 11:34:36 +0200 Subject: [PATCH 280/332] 1.116.0rc1 --- CHANGES.md | 66 +++++++++++++++++++++++++++++++++++++++ changelog.d/17326.feature | 2 -- changelog.d/17506.feature | 2 -- changelog.d/17662.feature | 1 - changelog.d/17667.misc | 5 --- changelog.d/17675.feature | 1 - changelog.d/17690.feature | 1 - changelog.d/17692.bugfix | 1 - changelog.d/17693.misc | 1 - changelog.d/17695.bugfix | 1 - changelog.d/17696.misc | 1 - changelog.d/17703.misc | 1 - changelog.d/17707.feature | 1 - changelog.d/17723.misc | 1 - changelog.d/17724.misc | 1 - changelog.d/17725.misc | 1 - changelog.d/17727.bugfix | 1 - changelog.d/17728.misc | 1 - changelog.d/17729.bugfix | 1 - changelog.d/17730.misc | 1 - changelog.d/17731.misc | 1 - changelog.d/17733.bugfix | 1 - changelog.d/17734.misc | 1 - changelog.d/17741.misc | 1 - changelog.d/17744.misc | 1 - changelog.d/17748.bugfix | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 28 files changed, 73 insertions(+), 32 deletions(-) delete mode 100644 changelog.d/17326.feature delete mode 100644 changelog.d/17506.feature delete mode 100644 changelog.d/17662.feature delete mode 100644 changelog.d/17667.misc delete mode 100644 changelog.d/17675.feature delete mode 100644 changelog.d/17690.feature delete mode 100644 changelog.d/17692.bugfix delete mode 100644 changelog.d/17693.misc delete mode 100644 changelog.d/17695.bugfix delete mode 100644 changelog.d/17696.misc delete mode 100644 changelog.d/17703.misc delete mode 100644 changelog.d/17707.feature delete mode 100644 changelog.d/17723.misc delete mode 100644 changelog.d/17724.misc delete mode 100644 changelog.d/17725.misc delete mode 100644 changelog.d/17727.bugfix delete mode 100644 changelog.d/17728.misc delete mode 100644 changelog.d/17729.bugfix delete mode 100644 changelog.d/17730.misc delete mode 100644 changelog.d/17731.misc delete mode 100644 changelog.d/17733.bugfix delete mode 100644 changelog.d/17734.misc delete mode 100644 changelog.d/17741.misc delete mode 100644 changelog.d/17744.misc delete mode 100644 changelog.d/17748.bugfix diff --git a/CHANGES.md b/CHANGES.md index 26f9326e4c..2ab86b428f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,69 @@ +# Synapse 1.116.0rc1 (2024-09-25) + +### Features + +- Add initial implementation of delayed events as proposed by [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140). ([\#17326](https://github.com/element-hq/synapse/issues/17326)) +- Add an asynchronous Admin API endpoint [to redact all a user's events](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#redact-all-the-events-of-a-user), + and [an endpoint to check on the status of that redaction task](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#check-the-status-of-a-redaction-process). ([\#17506](https://github.com/element-hq/synapse/issues/17506)) +- Add support for the `tags` and `not_tags` filters for simplified sliding sync. ([\#17662](https://github.com/element-hq/synapse/issues/17662)) +- Guests can use the new media endpoints to download media, as described by [MSC4189](https://github.com/matrix-org/matrix-spec-proposals/pull/4189). ([\#17675](https://github.com/element-hq/synapse/issues/17675)) +- Add config option `turn_shared_secret_path`. ([\#17690](https://github.com/element-hq/synapse/issues/17690)) +- Return room tags in Sliding Sync account data extension. ([\#17707](https://github.com/element-hq/synapse/issues/17707)) + +### Bugfixes + +- Make sure we get up-to-date state information when using the new Sliding Sync tables to derive room membership. ([\#17692](https://github.com/element-hq/synapse/issues/17692)) +- Fix bug where room account data would not correctly be sent down sliding sync for old rooms. ([\#17695](https://github.com/element-hq/synapse/issues/17695)) +- Fix a bug in SSS which could prevent /sync from working for certain user accounts. ([\#17727](https://github.com/element-hq/synapse/issues/17727), [\#17733](https://github.com/element-hq/synapse/issues/17733)) +- Ignore invites from ignored users in Sliding Sync. ([\#17729](https://github.com/element-hq/synapse/issues/17729)) +- Fix bug in sliding sync where the server would incorrectly return a negative bump stamp, which caused Element X apps to stop syncing. ([\#17748](https://github.com/element-hq/synapse/issues/17748)) + +### Internal Changes + +- Import pydantic objects from the `_pydantic_compat` module. + + This allows `check_pydantic_models.py` to mock those pydantic objects + only in the synapse module, and not interfere with pydantic objects in + external dependencies. ([\#17667](https://github.com/element-hq/synapse/issues/17667)) +- Use Sliding Sync tables as a bulk shortcut for getting the max `event_stream_ordering` of rooms. ([\#17693](https://github.com/element-hq/synapse/issues/17693)) +- Speed up sliding sync requests a bit where there are many room changes. ([\#17696](https://github.com/element-hq/synapse/issues/17696)) +- Refactor sliding sync filter unit tests so the sliding sync API has better test coverage. ([\#17703](https://github.com/element-hq/synapse/issues/17703)) +- Fetch `bump_stamp`'s more efficiently in Sliding Sync. ([\#17723](https://github.com/element-hq/synapse/issues/17723)) +- Shortcut for checking if certain background updates have completed (utilized in Sliding Sync). ([\#17724](https://github.com/element-hq/synapse/issues/17724)) +- More efficiently fetch rooms for Sliding Sync. ([\#17725](https://github.com/element-hq/synapse/issues/17725)) +- Fix `_bulk_get_max_event_pos` being inefficient. ([\#17728](https://github.com/element-hq/synapse/issues/17728)) +- Add cache to `get_tags_for_room(...)`. ([\#17730](https://github.com/element-hq/synapse/issues/17730)) +- Small performance improvement in speeding up Sliding Sync. ([\#17731](https://github.com/element-hq/synapse/issues/17731)) +- Minor speed up of initial sliding sync requests. ([\#17734](https://github.com/element-hq/synapse/issues/17734)) +- Remove usage of the deprecated cgi module. ([\#17741](https://github.com/element-hq/synapse/issues/17741)) +- Fix typing of a variable that is not `Unknown` anymore after updating `treq`. ([\#17744](https://github.com/element-hq/synapse/issues/17744)) + + + +### Updates to locked dependencies + +* Bump anyhow from 1.0.86 to 1.0.87. ([\#17685](https://github.com/element-hq/synapse/issues/17685)) +* Bump anyhow from 1.0.87 to 1.0.89. ([\#17716](https://github.com/element-hq/synapse/issues/17716)) +* Bump bytes from 1.7.1 to 1.7.2. ([\#17743](https://github.com/element-hq/synapse/issues/17743)) +* Bump cryptography from 43.0.0 to 43.0.1. ([\#17689](https://github.com/element-hq/synapse/issues/17689)) +* Bump idna from 3.8 to 3.10. ([\#17758](https://github.com/element-hq/synapse/issues/17758)) +* Bump msgpack from 1.0.8 to 1.1.0. ([\#17759](https://github.com/element-hq/synapse/issues/17759)) +* Bump phonenumbers from 8.13.44 to 8.13.45. ([\#17762](https://github.com/element-hq/synapse/issues/17762)) +* Bump prometheus-client from 0.20.0 to 0.21.0. ([\#17746](https://github.com/element-hq/synapse/issues/17746)) +* Bump pyasn1 from 0.6.0 to 0.6.1. ([\#17714](https://github.com/element-hq/synapse/issues/17714)) +* Bump pyasn1-modules from 0.4.0 to 0.4.1. ([\#17747](https://github.com/element-hq/synapse/issues/17747)) +* Bump pydantic from 2.8.2 to 2.9.2. ([\#17756](https://github.com/element-hq/synapse/issues/17756)) +* Bump python-multipart from 0.0.9 to 0.0.10. ([\#17745](https://github.com/element-hq/synapse/issues/17745)) +* Bump ruff from 0.6.4 to 0.6.5. ([\#17715](https://github.com/element-hq/synapse/issues/17715)) +* Bump ruff from 0.6.5 to 0.6.7. ([\#17760](https://github.com/element-hq/synapse/issues/17760)) +* Bump sentry-sdk from 2.13.0 to 2.14.0. ([\#17712](https://github.com/element-hq/synapse/issues/17712)) +* Bump serde from 1.0.209 to 1.0.210. ([\#17686](https://github.com/element-hq/synapse/issues/17686)) +* Bump serde_json from 1.0.127 to 1.0.128. ([\#17687](https://github.com/element-hq/synapse/issues/17687)) +* Bump treq from 23.11.0 to 24.9.1. ([\#17744](https://github.com/element-hq/synapse/issues/17744)) +* Bump types-pyyaml from 6.0.12.20240808 to 6.0.12.20240917. ([\#17755](https://github.com/element-hq/synapse/issues/17755)) +* Bump types-requests from 2.32.0.20240712 to 2.32.0.20240914. ([\#17713](https://github.com/element-hq/synapse/issues/17713)) +* Bump types-setuptools from 74.1.0.20240907 to 75.1.0.20240917. ([\#17757](https://github.com/element-hq/synapse/issues/17757)) + # Synapse 1.115.0 (2024-09-17) No significant changes since 1.115.0rc2. diff --git a/changelog.d/17326.feature b/changelog.d/17326.feature deleted file mode 100644 index 348c54b040..0000000000 --- a/changelog.d/17326.feature +++ /dev/null @@ -1,2 +0,0 @@ -Add initial implementation of delayed events as proposed by [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140). - diff --git a/changelog.d/17506.feature b/changelog.d/17506.feature deleted file mode 100644 index dc71e43fe3..0000000000 --- a/changelog.d/17506.feature +++ /dev/null @@ -1,2 +0,0 @@ -Add an asynchronous Admin API endpoint [to redact all a user's events](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#redact-all-the-events-of-a-user), -and [an endpoint to check on the status of that redaction task](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#check-the-status-of-a-redaction-process). \ No newline at end of file diff --git a/changelog.d/17662.feature b/changelog.d/17662.feature deleted file mode 100644 index 46d6037a18..0000000000 --- a/changelog.d/17662.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for the `tags` and `not_tags` filters for simplified sliding sync. diff --git a/changelog.d/17667.misc b/changelog.d/17667.misc deleted file mode 100644 index 6526f283bc..0000000000 --- a/changelog.d/17667.misc +++ /dev/null @@ -1,5 +0,0 @@ -Import pydantic objects from the `_pydantic_compat` module. - -This allows `check_pydantic_models.py` to mock those pydantic objects -only in the synapse module, and not interfere with pydantic objects in -external dependencies. diff --git a/changelog.d/17675.feature b/changelog.d/17675.feature deleted file mode 100644 index 20db149ca8..0000000000 --- a/changelog.d/17675.feature +++ /dev/null @@ -1 +0,0 @@ -Guests can use the new media endpoints to download media, as described by [MSC4189](https://github.com/matrix-org/matrix-spec-proposals/pull/4189). \ No newline at end of file diff --git a/changelog.d/17690.feature b/changelog.d/17690.feature deleted file mode 100644 index 36c72f89f8..0000000000 --- a/changelog.d/17690.feature +++ /dev/null @@ -1 +0,0 @@ -Add config option `turn_shared_secret_path`. \ No newline at end of file diff --git a/changelog.d/17692.bugfix b/changelog.d/17692.bugfix deleted file mode 100644 index 84e0754a99..0000000000 --- a/changelog.d/17692.bugfix +++ /dev/null @@ -1 +0,0 @@ -Make sure we get up-to-date state information when using the new Sliding Sync tables to derive room membership. diff --git a/changelog.d/17693.misc b/changelog.d/17693.misc deleted file mode 100644 index 0d20c80916..0000000000 --- a/changelog.d/17693.misc +++ /dev/null @@ -1 +0,0 @@ -Use Sliding Sync tables as a bulk shortcut for getting the max `event_stream_ordering` of rooms. diff --git a/changelog.d/17695.bugfix b/changelog.d/17695.bugfix deleted file mode 100644 index c63132704f..0000000000 --- a/changelog.d/17695.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where room account data would not correctly be sent down sliding sync for old rooms. diff --git a/changelog.d/17696.misc b/changelog.d/17696.misc deleted file mode 100644 index a2f1b1f399..0000000000 --- a/changelog.d/17696.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up sliding sync requests a bit where there are many room changes. diff --git a/changelog.d/17703.misc b/changelog.d/17703.misc deleted file mode 100644 index c5b0ea438a..0000000000 --- a/changelog.d/17703.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor sliding sync filter unit tests so the sliding sync API has better test coverage. diff --git a/changelog.d/17707.feature b/changelog.d/17707.feature deleted file mode 100644 index 98a00ca34b..0000000000 --- a/changelog.d/17707.feature +++ /dev/null @@ -1 +0,0 @@ -Return room tags in Sliding Sync account data extension. diff --git a/changelog.d/17723.misc b/changelog.d/17723.misc deleted file mode 100644 index 1f798b4ccc..0000000000 --- a/changelog.d/17723.misc +++ /dev/null @@ -1 +0,0 @@ -Fetch `bump_stamp`'s more efficiently in Sliding Sync. diff --git a/changelog.d/17724.misc b/changelog.d/17724.misc deleted file mode 100644 index 630443f179..0000000000 --- a/changelog.d/17724.misc +++ /dev/null @@ -1 +0,0 @@ -Shortcut for checking if certain background updates have completed (utilized in Sliding Sync). diff --git a/changelog.d/17725.misc b/changelog.d/17725.misc deleted file mode 100644 index 2a53bb1491..0000000000 --- a/changelog.d/17725.misc +++ /dev/null @@ -1 +0,0 @@ -More efficiently fetch rooms for Sliding Sync. diff --git a/changelog.d/17727.bugfix b/changelog.d/17727.bugfix deleted file mode 100644 index 64c6e90d87..0000000000 --- a/changelog.d/17727.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in SSS which could prevent /sync from working for certain user accounts. diff --git a/changelog.d/17728.misc b/changelog.d/17728.misc deleted file mode 100644 index 5ab241e9df..0000000000 --- a/changelog.d/17728.misc +++ /dev/null @@ -1 +0,0 @@ -Fix `_bulk_get_max_event_pos` being inefficient. diff --git a/changelog.d/17729.bugfix b/changelog.d/17729.bugfix deleted file mode 100644 index 4ba4e551c6..0000000000 --- a/changelog.d/17729.bugfix +++ /dev/null @@ -1 +0,0 @@ -Ignore invites from ignored users in Sliding Sync. diff --git a/changelog.d/17730.misc b/changelog.d/17730.misc deleted file mode 100644 index 56da7bfd1a..0000000000 --- a/changelog.d/17730.misc +++ /dev/null @@ -1 +0,0 @@ -Add cache to `get_tags_for_room(...)`. diff --git a/changelog.d/17731.misc b/changelog.d/17731.misc deleted file mode 100644 index d5df74b4c9..0000000000 --- a/changelog.d/17731.misc +++ /dev/null @@ -1 +0,0 @@ -Small performance improvement in speeding up Sliding Sync. diff --git a/changelog.d/17733.bugfix b/changelog.d/17733.bugfix deleted file mode 100644 index 64c6e90d87..0000000000 --- a/changelog.d/17733.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in SSS which could prevent /sync from working for certain user accounts. diff --git a/changelog.d/17734.misc b/changelog.d/17734.misc deleted file mode 100644 index 0bb5533ab6..0000000000 --- a/changelog.d/17734.misc +++ /dev/null @@ -1 +0,0 @@ -Minor speed up of initial sliding sync requests. diff --git a/changelog.d/17741.misc b/changelog.d/17741.misc deleted file mode 100644 index 119c81edab..0000000000 --- a/changelog.d/17741.misc +++ /dev/null @@ -1 +0,0 @@ -Remove usage of the deprecated cgi module. \ No newline at end of file diff --git a/changelog.d/17744.misc b/changelog.d/17744.misc deleted file mode 100644 index e4f7cc8efa..0000000000 --- a/changelog.d/17744.misc +++ /dev/null @@ -1 +0,0 @@ -Fix typing of a variable that is not `Unknown` anymore after updating `treq`. diff --git a/changelog.d/17748.bugfix b/changelog.d/17748.bugfix deleted file mode 100644 index dda8331f57..0000000000 --- a/changelog.d/17748.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in sliding sync where the server would incorrectly return a negative bump stamp, which caused Element X apps to stop syncing. diff --git a/debian/changelog b/debian/changelog index 5628eec8a5..1fb59ada66 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.116.0~rc1) stable; urgency=medium + + * New synapse release 1.116.0rc1. + + -- Synapse Packaging team Wed, 25 Sep 2024 09:34:07 +0000 + matrix-synapse-py3 (1.115.0) stable; urgency=medium * New Synapse release 1.115.0. diff --git a/pyproject.toml b/pyproject.toml index 480ab7f375..405fd1e1dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.115.0" +version = "1.116.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 13dea6949bee2820632d4937beee98434bac9a8c Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 25 Sep 2024 12:07:51 +0200 Subject: [PATCH 281/332] Changelog fixes --- CHANGES.md | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 2ab86b428f..5993971791 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,45 +5,43 @@ - Add initial implementation of delayed events as proposed by [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140). ([\#17326](https://github.com/element-hq/synapse/issues/17326)) - Add an asynchronous Admin API endpoint [to redact all a user's events](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#redact-all-the-events-of-a-user), and [an endpoint to check on the status of that redaction task](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#check-the-status-of-a-redaction-process). ([\#17506](https://github.com/element-hq/synapse/issues/17506)) -- Add support for the `tags` and `not_tags` filters for simplified sliding sync. ([\#17662](https://github.com/element-hq/synapse/issues/17662)) +- Add support for the `tags` and `not_tags` filters for [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17662](https://github.com/element-hq/synapse/issues/17662)) - Guests can use the new media endpoints to download media, as described by [MSC4189](https://github.com/matrix-org/matrix-spec-proposals/pull/4189). ([\#17675](https://github.com/element-hq/synapse/issues/17675)) - Add config option `turn_shared_secret_path`. ([\#17690](https://github.com/element-hq/synapse/issues/17690)) -- Return room tags in Sliding Sync account data extension. ([\#17707](https://github.com/element-hq/synapse/issues/17707)) +- Return room tags in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync account data extension. ([\#17707](https://github.com/element-hq/synapse/issues/17707)) ### Bugfixes -- Make sure we get up-to-date state information when using the new Sliding Sync tables to derive room membership. ([\#17692](https://github.com/element-hq/synapse/issues/17692)) -- Fix bug where room account data would not correctly be sent down sliding sync for old rooms. ([\#17695](https://github.com/element-hq/synapse/issues/17695)) -- Fix a bug in SSS which could prevent /sync from working for certain user accounts. ([\#17727](https://github.com/element-hq/synapse/issues/17727), [\#17733](https://github.com/element-hq/synapse/issues/17733)) +- Make sure we get up-to-date state information when using the new [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync tables to derive room membership. ([\#17692](https://github.com/element-hq/synapse/issues/17692)) +- Fix bug where room account data would not correctly be sent down [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync for old rooms. ([\#17695](https://github.com/element-hq/synapse/issues/17695)) +- Fix a bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync which could prevent /sync from working for certain user accounts. ([\#17727](https://github.com/element-hq/synapse/issues/17727), [\#17733](https://github.com/element-hq/synapse/issues/17733)) - Ignore invites from ignored users in Sliding Sync. ([\#17729](https://github.com/element-hq/synapse/issues/17729)) -- Fix bug in sliding sync where the server would incorrectly return a negative bump stamp, which caused Element X apps to stop syncing. ([\#17748](https://github.com/element-hq/synapse/issues/17748)) +- Fix bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync where the server would incorrectly return a negative bump stamp, which caused Element X apps to stop syncing. ([\#17748](https://github.com/element-hq/synapse/issues/17748)) ### Internal Changes - Import pydantic objects from the `_pydantic_compat` module. - This allows `check_pydantic_models.py` to mock those pydantic objects only in the synapse module, and not interfere with pydantic objects in external dependencies. ([\#17667](https://github.com/element-hq/synapse/issues/17667)) -- Use Sliding Sync tables as a bulk shortcut for getting the max `event_stream_ordering` of rooms. ([\#17693](https://github.com/element-hq/synapse/issues/17693)) -- Speed up sliding sync requests a bit where there are many room changes. ([\#17696](https://github.com/element-hq/synapse/issues/17696)) -- Refactor sliding sync filter unit tests so the sliding sync API has better test coverage. ([\#17703](https://github.com/element-hq/synapse/issues/17703)) -- Fetch `bump_stamp`'s more efficiently in Sliding Sync. ([\#17723](https://github.com/element-hq/synapse/issues/17723)) -- Shortcut for checking if certain background updates have completed (utilized in Sliding Sync). ([\#17724](https://github.com/element-hq/synapse/issues/17724)) -- More efficiently fetch rooms for Sliding Sync. ([\#17725](https://github.com/element-hq/synapse/issues/17725)) +- Use [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync tables as a bulk shortcut for getting the max `event_stream_ordering` of rooms. ([\#17693](https://github.com/element-hq/synapse/issues/17693)) +- Speed up [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) sliding sync requests a bit where there are many room changes. ([\#17696](https://github.com/element-hq/synapse/issues/17696)) +- Refactor [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) sliding sync filter unit tests so the sliding sync API has better test coverage. ([\#17703](https://github.com/element-hq/synapse/issues/17703)) +- Fetch `bump_stamp`s more efficiently in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17723](https://github.com/element-hq/synapse/issues/17723)) +- Shortcut for checking if certain background updates have completed (utilized in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync). ([\#17724](https://github.com/element-hq/synapse/issues/17724)) +- More efficiently fetch rooms for [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17725](https://github.com/element-hq/synapse/issues/17725)) - Fix `_bulk_get_max_event_pos` being inefficient. ([\#17728](https://github.com/element-hq/synapse/issues/17728)) - Add cache to `get_tags_for_room(...)`. ([\#17730](https://github.com/element-hq/synapse/issues/17730)) -- Small performance improvement in speeding up Sliding Sync. ([\#17731](https://github.com/element-hq/synapse/issues/17731)) -- Minor speed up of initial sliding sync requests. ([\#17734](https://github.com/element-hq/synapse/issues/17734)) -- Remove usage of the deprecated cgi module. ([\#17741](https://github.com/element-hq/synapse/issues/17741)) +- Small performance improvement in speeding up [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17731](https://github.com/element-hq/synapse/issues/17731)) +- Minor speed up of initial [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) sliding sync requests. ([\#17734](https://github.com/element-hq/synapse/issues/17734)) +- Remove usage of the deprecated `cgi` module, deprecated in Python 3.11 and removed in Python 3.13. ([\#17741](https://github.com/element-hq/synapse/issues/17741)) - Fix typing of a variable that is not `Unknown` anymore after updating `treq`. ([\#17744](https://github.com/element-hq/synapse/issues/17744)) ### Updates to locked dependencies -* Bump anyhow from 1.0.86 to 1.0.87. ([\#17685](https://github.com/element-hq/synapse/issues/17685)) -* Bump anyhow from 1.0.87 to 1.0.89. ([\#17716](https://github.com/element-hq/synapse/issues/17716)) +* Bump anyhow from 1.0.86 to 1.0.89. ([\#17685](https://github.com/element-hq/synapse/issues/17685), [\#17716](https://github.com/element-hq/synapse/issues/17716)) * Bump bytes from 1.7.1 to 1.7.2. ([\#17743](https://github.com/element-hq/synapse/issues/17743)) * Bump cryptography from 43.0.0 to 43.0.1. ([\#17689](https://github.com/element-hq/synapse/issues/17689)) * Bump idna from 3.8 to 3.10. ([\#17758](https://github.com/element-hq/synapse/issues/17758)) @@ -54,8 +52,7 @@ * Bump pyasn1-modules from 0.4.0 to 0.4.1. ([\#17747](https://github.com/element-hq/synapse/issues/17747)) * Bump pydantic from 2.8.2 to 2.9.2. ([\#17756](https://github.com/element-hq/synapse/issues/17756)) * Bump python-multipart from 0.0.9 to 0.0.10. ([\#17745](https://github.com/element-hq/synapse/issues/17745)) -* Bump ruff from 0.6.4 to 0.6.5. ([\#17715](https://github.com/element-hq/synapse/issues/17715)) -* Bump ruff from 0.6.5 to 0.6.7. ([\#17760](https://github.com/element-hq/synapse/issues/17760)) +* Bump ruff from 0.6.4 to 0.6.7. ([\#17715](https://github.com/element-hq/synapse/issues/17715), [\#17760](https://github.com/element-hq/synapse/issues/17760)) * Bump sentry-sdk from 2.13.0 to 2.14.0. ([\#17712](https://github.com/element-hq/synapse/issues/17712)) * Bump serde from 1.0.209 to 1.0.210. ([\#17686](https://github.com/element-hq/synapse/issues/17686)) * Bump serde_json from 1.0.127 to 1.0.128. ([\#17687](https://github.com/element-hq/synapse/issues/17687)) From f144b4c7e9b57eeef1203bfe80c500799271c911 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 26 Sep 2024 13:18:28 +0100 Subject: [PATCH 282/332] Remove spurious TODO in debian install step (#17749) This was a note added in the PR to move to AGPL, which we failed to remove before landing. (The context for this was that we needed to decide if we were going to change which debian repository we published too, but decided not to in the end) --- changelog.d/17749.doc | 1 + docs/setup/installation.md | 8 +++----- 2 files changed, 4 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17749.doc diff --git a/changelog.d/17749.doc b/changelog.d/17749.doc new file mode 100644 index 0000000000..f00c0be3b7 --- /dev/null +++ b/changelog.d/17749.doc @@ -0,0 +1 @@ +Remove spurious "TODO UPDATE ALL THIS" note in the Debian installation docs. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index f538e1498a..9cebb89b4d 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -52,8 +52,6 @@ architecture via . To install the latest release: -TODO UPDATE ALL THIS - ```sh sudo apt install -y lsb-release wget apt-transport-https sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg @@ -316,7 +314,7 @@ sudo dnf group install "Development Tools" *Note: The term "RHEL" below refers to both Red Hat Enterprise Linux and Rocky Linux. The distributions are 1:1 binary compatible.* -It's recommended to use the latest Python versions. +It's recommended to use the latest Python versions. RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ship with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them. @@ -346,7 +344,7 @@ dnf install python3.12 python3.12-devel ``` Finally, install common prerequisites ```bash -dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf +dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf dnf group install "Development Tools" ``` ###### Using venv module instead of virtualenv command @@ -355,7 +353,7 @@ It's recommended to use Python venv module directly rather than the virtualenv c * On RHEL 9, virtualenv is only available on [EPEL](https://docs.fedoraproject.org/en-US/epel/). * On RHEL 8, virtualenv is based on Python 3.6. It does not support creating 3.11/3.12 virtual environments. -Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI. +Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI. ```bash mkdir -p ~/synapse From 302534c3487b7f3a4be6963d628f919d61bc22a6 Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Thu, 26 Sep 2024 09:25:05 -0400 Subject: [PATCH 283/332] Support MSC3757: Restricting who can overwrite a state event (#17513) Link to the MSC: https://github.com/matrix-org/matrix-spec-proposals/pull/3757 --------- Co-authored-by: Quentin Gliech --- changelog.d/17513.feature | 1 + scripts-dev/complement.sh | 1 + synapse/api/room_versions.py | 58 +++++ synapse/event_auth.py | 37 +++- tests/rest/client/test_owned_state.py | 308 ++++++++++++++++++++++++++ 5 files changed, 399 insertions(+), 6 deletions(-) create mode 100644 changelog.d/17513.feature create mode 100644 tests/rest/client/test_owned_state.py diff --git a/changelog.d/17513.feature b/changelog.d/17513.feature new file mode 100644 index 0000000000..21441c7211 --- /dev/null +++ b/changelog.d/17513.feature @@ -0,0 +1 @@ +Add implementation of restricting who can overwrite a state event as proposed by [MSC3757](https://github.com/matrix-org/matrix-spec-proposals/pull/3757). diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 8fef1ae022..b6dcb96e2c 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -220,6 +220,7 @@ test_packages=( ./tests/msc3874 ./tests/msc3890 ./tests/msc3391 + ./tests/msc3757 ./tests/msc3930 ./tests/msc3902 ./tests/msc3967 diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index fbc1d58ecb..4bde385f78 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -107,6 +107,8 @@ class RoomVersion: # support the flag. Unknown flags are ignored by the evaluator, making conditions # fail if used. msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag + # MSC3757: Restricting who can overwrite a state event + msc3757_enabled: bool class RoomVersions: @@ -128,6 +130,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V2 = RoomVersion( "2", @@ -147,6 +150,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V3 = RoomVersion( "3", @@ -166,6 +170,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V4 = RoomVersion( "4", @@ -185,6 +190,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V5 = RoomVersion( "5", @@ -204,6 +210,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V6 = RoomVersion( "6", @@ -223,6 +230,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V7 = RoomVersion( "7", @@ -242,6 +250,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V8 = RoomVersion( "8", @@ -261,6 +270,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V9 = RoomVersion( "9", @@ -280,6 +290,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V10 = RoomVersion( "10", @@ -299,6 +310,7 @@ class RoomVersions: knock_restricted_join_rule=True, enforce_int_power_levels=True, msc3931_push_features=(), + msc3757_enabled=False, ) MSC1767v10 = RoomVersion( # MSC1767 (Extensible Events) based on room version "10" @@ -319,6 +331,28 @@ class RoomVersions: knock_restricted_join_rule=True, enforce_int_power_levels=True, msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,), + msc3757_enabled=False, + ) + MSC3757v10 = RoomVersion( + # MSC3757 (Restricting who can overwrite a state event) based on room version "10" + "org.matrix.msc3757.10", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=True, + restricted_join_rule_fix=True, + knock_join_rule=True, + msc3389_relation_redactions=False, + knock_restricted_join_rule=True, + enforce_int_power_levels=True, + msc3931_push_features=(), + msc3757_enabled=True, ) V11 = RoomVersion( "11", @@ -338,6 +372,28 @@ class RoomVersions: knock_restricted_join_rule=True, enforce_int_power_levels=True, msc3931_push_features=(), + msc3757_enabled=False, + ) + MSC3757v11 = RoomVersion( + # MSC3757 (Restricting who can overwrite a state event) based on room version "11" + "org.matrix.msc3757.11", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + implicit_room_creator=True, # Used by MSC3820 + updated_redaction_rules=True, # Used by MSC3820 + restricted_join_rule=True, + restricted_join_rule_fix=True, + knock_join_rule=True, + msc3389_relation_redactions=False, + knock_restricted_join_rule=True, + enforce_int_power_levels=True, + msc3931_push_features=(), + msc3757_enabled=True, ) @@ -355,6 +411,8 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.V9, RoomVersions.V10, RoomVersions.V11, + RoomVersions.MSC3757v10, + RoomVersions.MSC3757v11, ) } diff --git a/synapse/event_auth.py b/synapse/event_auth.py index b834547d11..c208b900c5 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -388,6 +388,7 @@ LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = { RoomVersions.V9, RoomVersions.V10, RoomVersions.MSC1767v10, + RoomVersions.MSC3757v10, } @@ -790,9 +791,10 @@ def get_send_level( def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> bool: + state_key = event.get_state_key() power_levels_event = get_power_level_event(auth_events) - send_level = get_send_level(event.type, event.get("state_key"), power_levels_event) + send_level = get_send_level(event.type, state_key, power_levels_event) user_level = get_user_power_level(event.user_id, auth_events) if user_level < send_level: @@ -803,11 +805,34 @@ def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> b errcode=Codes.INSUFFICIENT_POWER, ) - # Check state_key - if hasattr(event, "state_key"): - if event.state_key.startswith("@"): - if event.state_key != event.user_id: - raise AuthError(403, "You are not allowed to set others state") + if ( + state_key is not None + and state_key.startswith("@") + and state_key != event.user_id + ): + if event.room_version.msc3757_enabled: + try: + colon_idx = state_key.index(":", 1) + suffix_idx = state_key.find("_", colon_idx + 1) + state_key_user_id = ( + state_key[:suffix_idx] if suffix_idx != -1 else state_key + ) + if not UserID.is_valid(state_key_user_id): + raise ValueError + except ValueError: + raise SynapseError( + 400, + "State key neither equals a valid user ID, nor starts with one plus an underscore", + errcode=Codes.BAD_JSON, + ) + if ( + # sender is owner of the state key + state_key_user_id == event.user_id + # sender has higher PL than the owner of the state key + or user_level > get_user_power_level(state_key_user_id, auth_events) + ): + return True + raise AuthError(403, "You are not allowed to set others state") return True diff --git a/tests/rest/client/test_owned_state.py b/tests/rest/client/test_owned_state.py new file mode 100644 index 0000000000..5fb5767676 --- /dev/null +++ b/tests/rest/client/test_owned_state.py @@ -0,0 +1,308 @@ +from http import HTTPStatus + +from parameterized import parameterized_class + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.errors import Codes +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions +from synapse.rest import admin +from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + +_STATE_EVENT_TEST_TYPE = "com.example.test" + +# To stress-test parsing, include separator & sigil characters +_STATE_KEY_SUFFIX = "_state_key_suffix:!@#$123" + + +class OwnedStateBase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.creator_user_id = self.register_user("creator", "pass") + self.creator_access_token = self.login("creator", "pass") + self.user1_user_id = self.register_user("user1", "pass") + self.user1_access_token = self.login("user1", "pass") + + self.room_id = self.helper.create_room_as( + self.creator_user_id, + tok=self.creator_access_token, + is_public=True, + extra_content={ + "power_level_content_override": { + "events": { + _STATE_EVENT_TEST_TYPE: 0, + }, + }, + }, + ) + + self.helper.join( + room=self.room_id, user=self.user1_user_id, tok=self.user1_access_token + ) + + +class WithoutOwnedStateTestCase(OwnedStateBase): + def default_config(self) -> JsonDict: + config = super().default_config() + config["default_room_version"] = RoomVersions.V10.identifier + return config + + def test_user_can_set_state_with_own_userid_key(self) -> None: + self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}", + tok=self.user1_access_token, + expect_code=HTTPStatus.OK, + ) + + def test_room_creator_cannot_set_state_with_own_suffixed_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.creator_user_id}{_STATE_KEY_SUFFIX}", + tok=self.creator_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_room_creator_cannot_set_state_with_other_userid_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}", + tok=self.creator_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_room_creator_cannot_set_state_with_other_suffixed_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX}", + tok=self.creator_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_room_creator_cannot_set_state_with_nonmember_userid_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key="@notinroom:hs2", + tok=self.creator_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_room_creator_cannot_set_state_with_malformed_userid_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key="@oops", + tok=self.creator_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + +@parameterized_class( + ("room_version",), + [(i,) for i, v in KNOWN_ROOM_VERSIONS.items() if v.msc3757_enabled], +) +class MSC3757OwnedStateTestCase(OwnedStateBase): + room_version: str + + def default_config(self) -> JsonDict: + config = super().default_config() + config["default_room_version"] = self.room_version + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) + + self.user2_user_id = self.register_user("user2", "pass") + self.user2_access_token = self.login("user2", "pass") + + self.helper.join( + room=self.room_id, user=self.user2_user_id, tok=self.user2_access_token + ) + + def test_user_can_set_state_with_own_suffixed_key(self) -> None: + self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX}", + tok=self.user1_access_token, + expect_code=HTTPStatus.OK, + ) + + def test_room_creator_can_set_state_with_other_userid_key(self) -> None: + self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}", + tok=self.creator_access_token, + expect_code=HTTPStatus.OK, + ) + + def test_room_creator_can_set_state_with_other_suffixed_key(self) -> None: + self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX}", + tok=self.creator_access_token, + expect_code=HTTPStatus.OK, + ) + + def test_user_cannot_set_state_with_other_userid_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user2_user_id}", + tok=self.user1_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_user_cannot_set_state_with_other_suffixed_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user2_user_id}{_STATE_KEY_SUFFIX}", + tok=self.user1_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_user_cannot_set_state_with_unseparated_suffixed_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX[1:]}", + tok=self.user1_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_user_cannot_set_state_with_misplaced_userid_in_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + # Still put @ at start of state key, because without it, there is no write protection at all + state_key=f"@prefix_{self.user1_user_id}{_STATE_KEY_SUFFIX}", + tok=self.user1_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_room_creator_can_set_state_with_nonmember_userid_key(self) -> None: + self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key="@notinroom:hs2", + tok=self.creator_access_token, + expect_code=HTTPStatus.OK, + ) + + def test_room_creator_cannot_set_state_with_malformed_userid_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key="@oops", + tok=self.creator_access_token, + expect_code=HTTPStatus.BAD_REQUEST, + ) + + self.assertEqual( + body["errcode"], + Codes.BAD_JSON, + body, + ) + + def test_room_creator_cannot_set_state_with_improperly_suffixed_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.creator_user_id}@{_STATE_KEY_SUFFIX[1:]}", + tok=self.creator_access_token, + expect_code=HTTPStatus.BAD_REQUEST, + ) + + self.assertEqual( + body["errcode"], + Codes.BAD_JSON, + body, + ) From cfbddc258fd18bb8117bef84d2021c6f1e70795f Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Thu, 26 Sep 2024 15:29:13 +0200 Subject: [PATCH 284/332] 1.116.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/17513.feature | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/17513.feature diff --git a/CHANGES.md b/CHANGES.md index 5993971791..6e7efd7630 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +# Synapse 1.116.0rc2 (2024-09-26) + +### Features + +- Add implementation of restricting who can overwrite a state event as proposed by [MSC3757](https://github.com/matrix-org/matrix-spec-proposals/pull/3757). ([\#17513](https://github.com/element-hq/synapse/issues/17513)) + + + + # Synapse 1.116.0rc1 (2024-09-25) ### Features diff --git a/changelog.d/17513.feature b/changelog.d/17513.feature deleted file mode 100644 index 21441c7211..0000000000 --- a/changelog.d/17513.feature +++ /dev/null @@ -1 +0,0 @@ -Add implementation of restricting who can overwrite a state event as proposed by [MSC3757](https://github.com/matrix-org/matrix-spec-proposals/pull/3757). diff --git a/debian/changelog b/debian/changelog index 1fb59ada66..6fd2acbfc0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.116.0~rc2) stable; urgency=medium + + * New synapse release 1.116.0rc2. + + -- Synapse Packaging team Thu, 26 Sep 2024 13:28:43 +0000 + matrix-synapse-py3 (1.116.0~rc1) stable; urgency=medium * New synapse release 1.116.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 405fd1e1dc..252c00431f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.116.0rc1" +version = "1.116.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From ece66ba61c3e2fe5b968ac564ea4d9019ddc7c70 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Sep 2024 12:58:02 +0100 Subject: [PATCH 285/332] Minor perf speed up for large accounts on SSS (#17751) This works as instead of passing *all* rooms to `record_sent_rooms` we only need to pass rooms that were previously not in the LIVE state. This came from a py-spy where we were spending ~10% CPU calling these functions. Note that `record_sent_rooms` is a no-op for rooms that are already in the `LIVE` state, so we only need to call them for `PREVIOUSLY` or `INITIAL` rooms. --- changelog.d/17751.misc | 1 + synapse/handlers/sliding_sync/extensions.py | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17751.misc diff --git a/changelog.d/17751.misc b/changelog.d/17751.misc new file mode 100644 index 0000000000..4d35327481 --- /dev/null +++ b/changelog.d/17751.misc @@ -0,0 +1 @@ +Minor performance increase for large accounts using sliding sync. diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 56e1d9329e..0c77b52513 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -572,7 +572,8 @@ class SlidingSyncExtensionHandler: # Now record which rooms are now up to data, and which rooms have # pending updates to send. - new_connection_state.account_data.record_sent_rooms(relevant_room_ids) + new_connection_state.account_data.record_sent_rooms(previously_rooms.keys()) + new_connection_state.account_data.record_sent_rooms(initial_rooms) missing_updates = ( all_updates_since_the_from_token.keys() - relevant_room_ids ) @@ -763,9 +764,10 @@ class SlidingSyncExtensionHandler: room_id_to_receipt_map[room_id] = {"type": type, "content": content} - # Now we update the per-connection state to track which receipts we have - # and haven't sent down. - new_connection_state.receipts.record_sent_rooms(relevant_room_ids) + # Update the per-connection state to track which rooms we have sent + # all the receipts for. + new_connection_state.receipts.record_sent_rooms(previously_rooms.keys()) + new_connection_state.receipts.record_sent_rooms(initial_rooms) if from_token: # Now find the set of rooms that may have receipts that we're not sending From 93889eb2e707af0a4e0722e614f924a4363be544 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Sep 2024 12:58:13 +0100 Subject: [PATCH 286/332] Optimise notifier (#17765) The notifier is quite inefficient when it has to wake up many user streams all at once From a silly benchmark this takes the time to notify 1M user streams from ~30s to ~5s --- changelog.d/17765.misc | 1 + synapse/notifier.py | 46 ++++++++++++++++------------------ tests/rest/client/test_sync.py | 31 +++++++++++++++-------- 3 files changed, 43 insertions(+), 35 deletions(-) create mode 100644 changelog.d/17765.misc diff --git a/changelog.d/17765.misc b/changelog.d/17765.misc new file mode 100644 index 0000000000..af4e5c85ea --- /dev/null +++ b/changelog.d/17765.misc @@ -0,0 +1 @@ +Increase performance of the notifier when there are many syncing users. diff --git a/synapse/notifier.py b/synapse/notifier.py index 7a2b54036c..744cbddfa3 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -63,6 +63,7 @@ from synapse.types import ( ) from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure +from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -120,14 +121,13 @@ class _NotifierUserStream: ): self.user_id = user_id self.rooms = set(rooms) - self.current_token = current_token # The last token for which we should wake up any streams that have a # token that comes before it. This gets updated every time we get poked. # We start it at the current token since if we get any streams # that have a token from before we have no idea whether they should be # woken up or not, so lets just wake them up. - self.last_notified_token = current_token + self.current_token = current_token self.last_notified_ms = time_now_ms self.notify_deferred: ObservableDeferred[StreamToken] = ObservableDeferred( @@ -136,33 +136,19 @@ class _NotifierUserStream: def notify( self, - stream_key: StreamKeyType, - stream_id: Union[int, RoomStreamToken, MultiWriterStreamToken], + current_token: StreamToken, time_now_ms: int, ) -> None: """Notify any listeners for this user of a new event from an event source. Args: - stream_key: The stream the event came from. - stream_id: The new id for the stream the event came from. + current_token: The new current token. time_now_ms: The current time in milliseconds. """ - self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) - self.last_notified_token = self.current_token + self.current_token = current_token self.last_notified_ms = time_now_ms notify_deferred = self.notify_deferred - log_kv( - { - "notify": self.user_id, - "stream": stream_key, - "stream_id": stream_id, - "listeners": self.count_listeners(), - } - ) - - users_woken_by_stream_counter.labels(stream_key).inc() - with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) notify_deferred.callback(self.current_token) @@ -191,7 +177,7 @@ class _NotifierUserStream: """ # Immediately wake up stream if something has already since happened # since their last token. - if self.last_notified_token != token: + if self.current_token != token: return _NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe()) @@ -342,14 +328,17 @@ class Notifier: # Wake up all related user stream notifiers user_streams = self.room_to_user_streams.get(room_id, set()) time_now_ms = self.clock.time_msec() + current_token = self.event_sources.get_current_token() for user_stream in user_streams: try: - user_stream.notify( - StreamKeyType.UN_PARTIAL_STATED_ROOMS, new_token, time_now_ms - ) + user_stream.notify(current_token, time_now_ms) except Exception: logger.exception("Failed to notify listener") + users_woken_by_stream_counter.labels(StreamKeyType.UN_PARTIAL_STATED_ROOMS).inc( + len(user_streams) + ) + # Poke the replication so that other workers also see the write to # the un-partial-stated rooms stream. self.notify_replication() @@ -519,12 +508,16 @@ class Notifier: rooms = rooms or [] with Measure(self.clock, "on_new_event"): - user_streams = set() + user_streams: Set[_NotifierUserStream] = set() log_kv( { "waking_up_explicit_users": len(users), "waking_up_explicit_rooms": len(rooms), + "users": shortstr(users), + "rooms": shortstr(rooms), + "stream": stream_key, + "stream_id": new_token, } ) @@ -544,12 +537,15 @@ class Notifier: ) time_now_ms = self.clock.time_msec() + current_token = self.event_sources.get_current_token() for user_stream in user_streams: try: - user_stream.notify(stream_key, new_token, time_now_ms) + user_stream.notify(current_token, time_now_ms) except Exception: logger.exception("Failed to notify listener") + users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams)) + self.notify_replication() # Notify appservices. diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 63df31ec75..c52a5b2e79 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -282,22 +282,33 @@ class SyncTypingTests(unittest.HomeserverTestCase): self.assertEqual(200, channel.code) next_batch = channel.json_body["next_batch"] - # This should time out! But it does not, because our stream token is - # ahead, and therefore it's saying the typing (that we've actually - # already seen) is new, since it's got a token above our new, now-reset - # stream token. - channel = self.make_request("GET", sync_url % (access_token, next_batch)) - self.assertEqual(200, channel.code) - next_batch = channel.json_body["next_batch"] - # Clear the typing information, so that it doesn't think everything is - # in the future. + # in the future. This happens automatically when the typing stream + # resets. typing._reset() - # Now it SHOULD fail as it never completes! + # Nothing new, so we time out. with self.assertRaises(TimedOutException): self.make_request("GET", sync_url % (access_token, next_batch)) + # Sync and start typing again. + sync_channel = self.make_request( + "GET", sync_url % (access_token, next_batch), await_result=False + ) + self.assertFalse(sync_channel.is_finished()) + + channel = self.make_request( + "PUT", + typing_url % (room, other_user_id, other_access_token), + b'{"typing": true, "timeout": 30000}', + ) + self.assertEqual(200, channel.code) + + # Sync should now return. + sync_channel.await_result() + self.assertEqual(200, sync_channel.code) + next_batch = sync_channel.json_body["next_batch"] + class SyncKnockTestCase(KnockingStrippedStateEventHelperMixin): servlets = [ From de955293cf86dd075c6d5080670b144561a751e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Sep 2024 12:59:50 +0100 Subject: [PATCH 287/332] Add fast path for sliding sync streams that only ask for extensions (#17768) Principally useful for EX e2ee sliding sync connections. --- changelog.d/17768.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 changelog.d/17768.misc diff --git a/changelog.d/17768.misc b/changelog.d/17768.misc new file mode 100644 index 0000000000..3b80e72534 --- /dev/null +++ b/changelog.d/17768.misc @@ -0,0 +1 @@ +Improve performance of sliding sync connections that do not ask for any rooms. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 0c9722021a..08e619042b 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -123,6 +123,19 @@ class SlidingSyncInterestedRooms: newly_left_rooms: AbstractSet[str] dm_room_ids: AbstractSet[str] + @staticmethod + def empty() -> "SlidingSyncInterestedRooms": + return SlidingSyncInterestedRooms( + lists={}, + relevant_room_map={}, + relevant_rooms_to_send_map={}, + all_rooms=set(), + room_membership_for_user_map={}, + newly_joined_rooms=set(), + newly_left_rooms=set(), + dm_room_ids=set(), + ) + def filter_membership_for_sync( *, @@ -181,6 +194,14 @@ class SlidingSyncRoomLists: from_token: Optional[StreamToken], ) -> SlidingSyncInterestedRooms: """Fetch the set of rooms that match the request""" + has_lists = sync_config.lists is not None and len(sync_config.lists) > 0 + has_room_subscriptions = ( + sync_config.room_subscriptions is not None + and len(sync_config.room_subscriptions) > 0 + ) + + if not has_lists and not has_room_subscriptions: + return SlidingSyncInterestedRooms.empty() if await self.store.have_finished_sliding_sync_background_jobs(): return await self._compute_interested_rooms_new_tables( From 5210565c127bef662d595157f26a063135e6a6ae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Sep 2024 13:00:14 +0100 Subject: [PATCH 288/332] Reduce overhead of sliding sync E2EE loops (#17771) Mainly toning down logging and only calling `get_membership_from_event_ids` if something has changed. --- changelog.d/17771.misc | 1 + synapse/api/auth/msc3861_delegated.py | 2 +- synapse/handlers/device.py | 11 ++++++----- 3 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelog.d/17771.misc diff --git a/changelog.d/17771.misc b/changelog.d/17771.misc new file mode 100644 index 0000000000..be28223151 --- /dev/null +++ b/changelog.d/17771.misc @@ -0,0 +1 @@ +Reduce overhead of sliding sync E2EE loops. diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 6bd845c7e3..53907c01d4 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -338,7 +338,7 @@ class MSC3861DelegatedAuth(BaseAuth): logger.exception("Failed to introspect token") raise SynapseError(503, "Unable to introspect the access token") - logger.info(f"Introspection result: {introspection_result!r}") + logger.debug("Introspection result: %r", introspection_result) # TODO: introspection verification should be more extensive, especially: # - verify the audience diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 4f2a9f3a5b..d88660e273 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -48,6 +48,7 @@ from synapse.metrics.background_process_metrics import ( wrap_as_background_process, ) from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo +from synapse.storage.databases.main.roommember import EventIdMembership from synapse.storage.databases.main.state_deltas import StateDelta from synapse.types import ( DeviceListUpdates, @@ -222,7 +223,6 @@ class DeviceWorkerHandler: return changed @trace - @measure_func("device.get_user_ids_changed") @cancellable async def get_user_ids_changed( self, user_id: str, from_token: StreamToken @@ -290,9 +290,11 @@ class DeviceWorkerHandler: memberships_to_fetch.add(delta.prev_event_id) # Fetch all the memberships for the membership events - event_id_to_memberships = await self.store.get_membership_from_event_ids( - memberships_to_fetch - ) + event_id_to_memberships: Mapping[str, Optional[EventIdMembership]] = {} + if memberships_to_fetch: + event_id_to_memberships = await self.store.get_membership_from_event_ids( + memberships_to_fetch + ) joined_invited_knocked = ( Membership.JOIN, @@ -349,7 +351,6 @@ class DeviceWorkerHandler: return device_list_updates - @measure_func("_generate_sync_entry_for_device_list") async def generate_sync_entry_for_device_list( self, user_id: str, From 8068f31146a10d0ab1dc1657dafd99dbe5b05450 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:06:43 +0100 Subject: [PATCH 289/332] Bump regex from 1.10.6 to 1.11.0 (#17770) --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6cab1085d..dfc8dc6047 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -444,9 +444,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", @@ -456,9 +456,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -467,9 +467,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "ryu" From 444b565c76dbf5550a6673e7e8fd5f79a57b949b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:07:57 +0100 Subject: [PATCH 290/332] Bump phonenumbers from 8.13.45 to 8.13.46 (#17773) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0f2ee8cac0..2dc06917cd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1447,13 +1447,13 @@ dev = ["jinja2"] [[package]] name = "phonenumbers" -version = "8.13.45" +version = "8.13.46" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.45-py2.py3-none-any.whl", hash = "sha256:bf05ec20fcd13f0d53e43a34ed7bd1c8be26a72b88fce4b8c64fca5b4641987a"}, - {file = "phonenumbers-8.13.45.tar.gz", hash = "sha256:53679a95b6060fd5e15467759252c87933d8566d6a5be00995a579eb0e02435b"}, + {file = "phonenumbers-8.13.46-py2.py3-none-any.whl", hash = "sha256:519422d407af066fdbf98e179ea2e214487060f26526d67871f817eefbbb2134"}, + {file = "phonenumbers-8.13.46.tar.gz", hash = "sha256:94bf18ba9725bb6868d29473b13f78ef01e2585c5cb561ec0200be7676e77452"}, ] [[package]] From 602956ef64b448c1d92ec346860a1d696b9c5955 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:08:56 +0100 Subject: [PATCH 291/332] Bump ruff from 0.6.7 to 0.6.8 (#17774) --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2dc06917cd..7ab990e76b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2277,29 +2277,29 @@ files = [ [[package]] name = "ruff" -version = "0.6.7" +version = "0.6.8" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.7-py3-none-linux_armv6l.whl", hash = "sha256:08277b217534bfdcc2e1377f7f933e1c7957453e8a79764d004e44c40db923f2"}, - {file = "ruff-0.6.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:c6707a32e03b791f4448dc0dce24b636cbcdee4dd5607adc24e5ee73fd86c00a"}, - {file = "ruff-0.6.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:533d66b7774ef224e7cf91506a7dafcc9e8ec7c059263ec46629e54e7b1f90ab"}, - {file = "ruff-0.6.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17a86aac6f915932d259f7bec79173e356165518859f94649d8c50b81ff087e9"}, - {file = "ruff-0.6.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3f8822defd260ae2460ea3832b24d37d203c3577f48b055590a426a722d50ef"}, - {file = "ruff-0.6.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ba4efe5c6dbbb58be58dd83feedb83b5e95c00091bf09987b4baf510fee5c99"}, - {file = "ruff-0.6.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:525201b77f94d2b54868f0cbe5edc018e64c22563da6c5c2e5c107a4e85c1c0d"}, - {file = "ruff-0.6.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8854450839f339e1049fdbe15d875384242b8e85d5c6947bb2faad33c651020b"}, - {file = "ruff-0.6.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f0b62056246234d59cbf2ea66e84812dc9ec4540518e37553513392c171cb18"}, - {file = "ruff-0.6.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b1462fa56c832dc0cea5b4041cfc9c97813505d11cce74ebc6d1aae068de36b"}, - {file = "ruff-0.6.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:02b083770e4cdb1495ed313f5694c62808e71764ec6ee5db84eedd82fd32d8f5"}, - {file = "ruff-0.6.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0c05fd37013de36dfa883a3854fae57b3113aaa8abf5dea79202675991d48624"}, - {file = "ruff-0.6.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f49c9caa28d9bbfac4a637ae10327b3db00f47d038f3fbb2195c4d682e925b14"}, - {file = "ruff-0.6.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a0e1655868164e114ba43a908fd2d64a271a23660195017c17691fb6355d59bb"}, - {file = "ruff-0.6.7-py3-none-win32.whl", hash = "sha256:a939ca435b49f6966a7dd64b765c9df16f1faed0ca3b6f16acdf7731969deb35"}, - {file = "ruff-0.6.7-py3-none-win_amd64.whl", hash = "sha256:590445eec5653f36248584579c06252ad2e110a5d1f32db5420de35fb0e1c977"}, - {file = "ruff-0.6.7-py3-none-win_arm64.whl", hash = "sha256:b28f0d5e2f771c1fe3c7a45d3f53916fc74a480698c4b5731f0bea61e52137c8"}, - {file = "ruff-0.6.7.tar.gz", hash = "sha256:44e52129d82266fa59b587e2cd74def5637b730a69c4542525dfdecfaae38bd5"}, + {file = "ruff-0.6.8-py3-none-linux_armv6l.whl", hash = "sha256:77944bca110ff0a43b768f05a529fecd0706aac7bcce36d7f1eeb4cbfca5f0f2"}, + {file = "ruff-0.6.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27b87e1801e786cd6ede4ada3faa5e254ce774de835e6723fd94551464c56b8c"}, + {file = "ruff-0.6.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd48f945da2a6334f1793d7f701725a76ba93bf3d73c36f6b21fb04d5338dcf5"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:677e03c00f37c66cea033274295a983c7c546edea5043d0c798833adf4cf4c6f"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9f1476236b3eacfacfc0f66aa9e6cd39f2a624cb73ea99189556015f27c0bdeb"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f5a2f17c7d32991169195d52a04c95b256378bbf0de8cb98478351eb70d526f"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5fd0d4b7b1457c49e435ee1e437900ced9b35cb8dc5178921dfb7d98d65a08d0"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8034b19b993e9601f2ddf2c517451e17a6ab5cdb1c13fdff50c1442a7171d87"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cfb227b932ba8ef6e56c9f875d987973cd5e35bc5d05f5abf045af78ad8e098"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef0411eccfc3909269fed47c61ffebdcb84a04504bafa6b6df9b85c27e813b0"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:007dee844738c3d2e6c24ab5bc7d43c99ba3e1943bd2d95d598582e9c1b27750"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ce60058d3cdd8490e5e5471ef086b3f1e90ab872b548814e35930e21d848c9ce"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1085c455d1b3fdb8021ad534379c60353b81ba079712bce7a900e834859182fa"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:70edf6a93b19481affd287d696d9e311388d808671bc209fb8907b46a8c3af44"}, + {file = "ruff-0.6.8-py3-none-win32.whl", hash = "sha256:792213f7be25316f9b46b854df80a77e0da87ec66691e8f012f887b4a671ab5a"}, + {file = "ruff-0.6.8-py3-none-win_amd64.whl", hash = "sha256:ec0517dc0f37cad14a5319ba7bba6e7e339d03fbf967a6d69b0907d61be7a263"}, + {file = "ruff-0.6.8-py3-none-win_arm64.whl", hash = "sha256:8d3bb2e3fbb9875172119021a13eed38849e762499e3cfde9588e4b4d70968dc"}, + {file = "ruff-0.6.8.tar.gz", hash = "sha256:a5bf44b1aa0adaf6d9d20f86162b34f7c593bfedabc51239953e446aefc8ce18"}, ] [[package]] @@ -3114,4 +3114,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "93c267fac3428b764f954e6faa17937b9c97b1ed2bdafc41dd8f6cb5d2ce085b" +content-hash = "304d03b74d2886def69ae44ce5afaed21318db9f09aae91281e0f182e1660ffd" diff --git a/pyproject.toml b/pyproject.toml index 252c00431f..62d5ef1725 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.6.7" +ruff = "0.6.8" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From ae4862c38f4f8efbe59c1759896fcef1db74ac53 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Sep 2024 13:32:31 +0100 Subject: [PATCH 292/332] Optimise notifier mk2 (#17766) Based on #17765. Basically the idea is to reduce the overhead of calling `ObservableDeferred` in a loop. The two gains are: a) just using a list of deferreds rather than the machinery of `ObservableDeferred`, and b) only calling `PreseverLoggingContext` once. `PreseverLoggingContext` in particular is expensive to call a lot as each time it needs to call `get_thread_resource_usage` twice, so that it an update the CPU metrics of the log context. --- changelog.d/17766.misc | 1 + synapse/notifier.py | 96 +++++++++++++++++++++++++++--------------- 2 files changed, 62 insertions(+), 35 deletions(-) create mode 100644 changelog.d/17766.misc diff --git a/changelog.d/17766.misc b/changelog.d/17766.misc new file mode 100644 index 0000000000..af4e5c85ea --- /dev/null +++ b/changelog.d/17766.misc @@ -0,0 +1 @@ +Increase performance of the notifier when there are many syncing users. diff --git a/synapse/notifier.py b/synapse/notifier.py index 744cbddfa3..88f531182a 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -41,6 +41,7 @@ import attr from prometheus_client import Counter from twisted.internet import defer +from twisted.internet.defer import Deferred from synapse.api.constants import EduTypes, EventTypes, HistoryVisibility, Membership from synapse.api.errors import AuthError @@ -52,6 +53,7 @@ from synapse.logging.opentracing import log_kv, start_active_span from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig from synapse.types import ( + ISynapseReactor, JsonDict, MultiWriterStreamToken, PersistedEventPosition, @@ -61,7 +63,9 @@ from synapse.types import ( StreamToken, UserID, ) -from synapse.util.async_helpers import ObservableDeferred, timeout_deferred +from synapse.util.async_helpers import ( + timeout_deferred, +) from synapse.util.metrics import Measure from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_client @@ -90,18 +94,6 @@ def count(func: Callable[[T], bool], it: Iterable[T]) -> int: return n -class _NotificationListener: - """This represents a single client connection to the events stream. - The events stream handler will have yielded to the deferred, so to - notify the handler it is sufficient to resolve the deferred. - """ - - __slots__ = ["deferred"] - - def __init__(self, deferred: "defer.Deferred"): - self.deferred = deferred - - class _NotifierUserStream: """This represents a user connected to the event stream. It tracks the most recent stream token for that user. @@ -114,11 +106,13 @@ class _NotifierUserStream: def __init__( self, + reactor: ISynapseReactor, user_id: str, rooms: StrCollection, current_token: StreamToken, time_now_ms: int, ): + self.reactor = reactor self.user_id = user_id self.rooms = set(rooms) @@ -130,28 +124,31 @@ class _NotifierUserStream: self.current_token = current_token self.last_notified_ms = time_now_ms - self.notify_deferred: ObservableDeferred[StreamToken] = ObservableDeferred( - defer.Deferred() - ) + # Set of listeners that we need to wake up when there has been a change. + self.listeners: Set[Deferred[StreamToken]] = set() - def notify( + def update_and_fetch_deferreds( self, current_token: StreamToken, time_now_ms: int, - ) -> None: - """Notify any listeners for this user of a new event from an - event source. + ) -> Collection["Deferred[StreamToken]"]: + """Update the stream for this user because of a new event from an + event source, and return the set of deferreds to wake up. + Args: current_token: The new current token. time_now_ms: The current time in milliseconds. + + Returns: + The set of deferreds that need to be called. """ self.current_token = current_token self.last_notified_ms = time_now_ms - notify_deferred = self.notify_deferred - with PreserveLoggingContext(): - self.notify_deferred = ObservableDeferred(defer.Deferred()) - notify_deferred.callback(self.current_token) + listeners = self.listeners + self.listeners = set() + + return listeners def remove(self, notifier: "Notifier") -> None: """Remove this listener from all the indexes in the Notifier @@ -165,9 +162,9 @@ class _NotifierUserStream: notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self) -> int: - return len(self.notify_deferred.observers()) + return len(self.listeners) - def new_listener(self, token: StreamToken) -> _NotificationListener: + def new_listener(self, token: StreamToken) -> "Deferred[StreamToken]": """Returns a deferred that is resolved when there is a new token greater than the given token. @@ -177,10 +174,17 @@ class _NotifierUserStream: """ # Immediately wake up stream if something has already since happened # since their last token. - if self.current_token != token: - return _NotificationListener(defer.succeed(self.current_token)) - else: - return _NotificationListener(self.notify_deferred.observe()) + if token != self.current_token: + return defer.succeed(self.current_token) + + # Create a new deferred and add it to the set of listeners. We add a + # cancel handler to remove it from the set again, to handle timeouts. + deferred: "Deferred[StreamToken]" = Deferred( + canceller=lambda d: self.listeners.discard(d) + ) + self.listeners.add(deferred) + + return deferred @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -233,6 +237,7 @@ class Notifier: # List of callbacks to be notified when a lock is released self._lock_released_callback: List[Callable[[str, str, str], None]] = [] + self.reactor = hs.get_reactor() self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool() @@ -329,12 +334,20 @@ class Notifier: user_streams = self.room_to_user_streams.get(room_id, set()) time_now_ms = self.clock.time_msec() current_token = self.event_sources.get_current_token() + + listeners: List["Deferred[StreamToken]"] = [] for user_stream in user_streams: try: - user_stream.notify(current_token, time_now_ms) + listeners.extend( + user_stream.update_and_fetch_deferreds(current_token, time_now_ms) + ) except Exception: logger.exception("Failed to notify listener") + with PreserveLoggingContext(): + for listener in listeners: + listener.callback(current_token) + users_woken_by_stream_counter.labels(StreamKeyType.UN_PARTIAL_STATED_ROOMS).inc( len(user_streams) ) @@ -538,12 +551,24 @@ class Notifier: time_now_ms = self.clock.time_msec() current_token = self.event_sources.get_current_token() + listeners: List["Deferred[StreamToken]"] = [] for user_stream in user_streams: try: - user_stream.notify(current_token, time_now_ms) + listeners.extend( + user_stream.update_and_fetch_deferreds( + current_token, time_now_ms + ) + ) except Exception: logger.exception("Failed to notify listener") + # We resolve all these deferreds in one go so that we only need to + # call `PreserveLoggingContext` once, as it has a bunch of overhead + # (to calculate performance stats) + with PreserveLoggingContext(): + for listener in listeners: + listener.callback(current_token) + users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams)) self.notify_replication() @@ -582,6 +607,7 @@ class Notifier: if room_ids is None: room_ids = await self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( + reactor=self.reactor, user_id=user_id, rooms=room_ids, current_token=current_token, @@ -604,8 +630,8 @@ class Notifier: # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) - listener.deferred = timeout_deferred( - listener.deferred, + listener = timeout_deferred( + listener, (end_time - now) / 1000.0, self.hs.get_reactor(), ) @@ -618,7 +644,7 @@ class Notifier: ) with PreserveLoggingContext(): - await listener.deferred + await listener log_kv( { From 81e0f57800590e63bbc07319db6da964595978d0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Sep 2024 13:52:33 +0100 Subject: [PATCH 293/332] Fix perf when streams don't change often (#17767) There is a bug with the `StreamChangeCache` where it would incorrectly return that all entities had changed if asked for entities changed *since* the earliest stream position. Note that for streams we use the inequalities: `$min_stream_id < stream_id <= $max_stream_id`, i.e. when we ask the stream change cache for all things that have changed since `$stream_id` we don't care for events that happened *at* `$stream_id`. Specifically: `_earliest_known_stream_pos` is the position at which we know that we'll have entries for all changes since that point, we can use the cache for any stream IDs that equal `_earliest_known_stream_pos`. `_earliest_known_stream_pos` is set in three places: - On startup we set it either to: - the current maximum stream ID, with not prefilled values; or - the minimum of the latest N values we pulled from the DB - When we evict items from the bottom, we set it to the stream ID of the evicted items. This was changed in https://github.com/matrix-org/synapse/pull/14435, but I think we were overly conservative there. --------- Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17767.misc | 1 + synapse/util/caches/stream_change_cache.py | 14 +++++++------- tests/util/test_stream_change_cache.py | 18 +++++++++++------- 3 files changed, 19 insertions(+), 14 deletions(-) create mode 100644 changelog.d/17767.misc diff --git a/changelog.d/17767.misc b/changelog.d/17767.misc new file mode 100644 index 0000000000..36f23d0f60 --- /dev/null +++ b/changelog.d/17767.misc @@ -0,0 +1 @@ +Fix performance of streams that don't change often. diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 16fcb00206..03503abe0f 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -142,9 +142,9 @@ class StreamChangeCache: """ assert isinstance(stream_pos, int) - # _cache is not valid at or before the earliest known stream position, so + # _cache is not valid before the earliest known stream position, so # return that the entity has changed. - if stream_pos <= self._earliest_known_stream_pos: + if stream_pos < self._earliest_known_stream_pos: self.metrics.inc_misses() return True @@ -186,7 +186,7 @@ class StreamChangeCache: This will be all entities if the given stream position is at or earlier than the earliest known stream position. """ - if not self._cache or stream_pos <= self._earliest_known_stream_pos: + if not self._cache or stream_pos < self._earliest_known_stream_pos: self.metrics.inc_misses() return set(entities) @@ -238,9 +238,9 @@ class StreamChangeCache: """ assert isinstance(stream_pos, int) - # _cache is not valid at or before the earliest known stream position, so + # _cache is not valid before the earliest known stream position, so # return that an entity has changed. - if stream_pos <= self._earliest_known_stream_pos: + if stream_pos < self._earliest_known_stream_pos: self.metrics.inc_misses() return True @@ -270,9 +270,9 @@ class StreamChangeCache: """ assert isinstance(stream_pos, int) - # _cache is not valid at or before the earliest known stream position, so + # _cache is not valid before the earliest known stream position, so # return None to mark that it is unknown if an entity has changed. - if stream_pos <= self._earliest_known_stream_pos: + if stream_pos < self._earliest_known_stream_pos: return AllEntitiesChangedResult(None) changed_entities: List[EntityType] = [] diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py index af1199ef8a..c41f5706af 100644 --- a/tests/util/test_stream_change_cache.py +++ b/tests/util/test_stream_change_cache.py @@ -53,8 +53,8 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): # return True, whether it's a known entity or not. self.assertTrue(cache.has_entity_changed("user@foo.com", 0)) self.assertTrue(cache.has_entity_changed("not@here.website", 0)) - self.assertTrue(cache.has_entity_changed("user@foo.com", 3)) - self.assertTrue(cache.has_entity_changed("not@here.website", 3)) + self.assertTrue(cache.has_entity_changed("user@foo.com", 2)) + self.assertTrue(cache.has_entity_changed("not@here.website", 2)) def test_entity_has_changed_pops_off_start(self) -> None: """ @@ -76,9 +76,11 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): self.assertTrue("user@foo.com" not in cache._entity_to_key) self.assertEqual( - cache.get_all_entities_changed(3).entities, ["user@elsewhere.org"] + cache.get_all_entities_changed(2).entities, + ["bar@baz.net", "user@elsewhere.org"], ) - self.assertFalse(cache.get_all_entities_changed(2).hit) + self.assertFalse(cache.get_all_entities_changed(1).hit) + self.assertTrue(cache.get_all_entities_changed(2).hit) # If we update an existing entity, it keeps the two existing entities cache.entity_has_changed("bar@baz.net", 5) @@ -89,7 +91,8 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): cache.get_all_entities_changed(3).entities, ["user@elsewhere.org", "bar@baz.net"], ) - self.assertFalse(cache.get_all_entities_changed(2).hit) + self.assertFalse(cache.get_all_entities_changed(1).hit) + self.assertTrue(cache.get_all_entities_changed(2).hit) def test_get_all_entities_changed(self) -> None: """ @@ -114,7 +117,8 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): self.assertEqual( cache.get_all_entities_changed(3).entities, ["user@elsewhere.org"] ) - self.assertFalse(cache.get_all_entities_changed(1).hit) + self.assertFalse(cache.get_all_entities_changed(0).hit) + self.assertTrue(cache.get_all_entities_changed(1).hit) # ... later, things gest more updates cache.entity_has_changed("user@foo.com", 5) @@ -149,7 +153,7 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): # With no entities, it returns True for the past, present, and False for # the future. self.assertTrue(cache.has_any_entity_changed(0)) - self.assertTrue(cache.has_any_entity_changed(1)) + self.assertFalse(cache.has_any_entity_changed(1)) self.assertFalse(cache.has_any_entity_changed(2)) # We add an entity From 55c0391cc821170706d656bd8a2252627865bedc Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 1 Oct 2024 11:14:13 +0100 Subject: [PATCH 294/332] 1.116.0 --- CHANGES.md | 7 +++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 6e7efd7630..ae69046a1a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.116.0 (2024-10-01) + +No significant changes since 1.116.0rc2. + + + + # Synapse 1.116.0rc2 (2024-09-26) ### Features diff --git a/debian/changelog b/debian/changelog index 6fd2acbfc0..c37536d7e8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.116.0) stable; urgency=medium + + * New Synapse release 1.116.0. + + -- Synapse Packaging team Tue, 01 Oct 2024 11:14:07 +0100 + matrix-synapse-py3 (1.116.0~rc2) stable; urgency=medium * New synapse release 1.116.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 252c00431f..d8994a25c0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.116.0rc2" +version = "1.116.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 8bbe66a9b9220a6d2bd692744b176290b4143da6 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 4 Oct 2024 00:01:33 +0200 Subject: [PATCH 295/332] explain load balancing for `federation_sender_instances` (#17776) Adding information on how the load is distributed for `federation_sender_instances`. Thx to @devonh for the information. causal source: https://github.com/element-hq/synapse/blob/c2e5e9e67c24264f5a12bf3ceaa9c4e195547d26/synapse/config/_base.py#L946-L989 ### Pull Request Checklist * [x] Pull request is based on the develop branch * [x] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [x] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --------- Co-authored-by: Devon Hudson --- changelog.d/17776.doc | 1 + docs/usage/configuration/config_documentation.md | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17776.doc diff --git a/changelog.d/17776.doc b/changelog.d/17776.doc new file mode 100644 index 0000000000..86754a3464 --- /dev/null +++ b/changelog.d/17776.doc @@ -0,0 +1 @@ +Explain how load balancing works for `federation_sender_instances`. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index f51924f064..08eedc03b7 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -4368,7 +4368,13 @@ It is possible to scale the processes that handle sending outbound federation re by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to a `federation_sender_instances` map. Doing so will remove handling of this function from the main process. Multiple workers can be added to this map, in which case the work is -balanced across them. +balanced across them. + +The way that the load balancing works is any outbound federation request will be assigned +to a federation sender worker based on the hash of the destination server name. This +means that all requests being sent to the same destination will be processed by the same +worker instance. Multiple `federation_sender_instances` are useful if there is a federation +with multiple servers. This configuration setting must be shared between all workers handling federation sending, and if changed all federation sender workers must be stopped at the same time From 316d635906773de8fbd53673d99d68dbb32c43d5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 4 Oct 2024 09:53:35 +0100 Subject: [PATCH 296/332] Fix NAME attribute of `ReplicationRemovePusherRestServlet` (#17779) --- changelog.d/17779.bugfix | 1 + synapse/replication/http/push.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17779.bugfix diff --git a/changelog.d/17779.bugfix b/changelog.d/17779.bugfix new file mode 100644 index 0000000000..72785830d9 --- /dev/null +++ b/changelog.d/17779.bugfix @@ -0,0 +1 @@ +Fix a rare bug introduced in v1.29.0 where invalidating a user's access token from a worker could raise an error. \ No newline at end of file diff --git a/synapse/replication/http/push.py b/synapse/replication/http/push.py index 2e06c43ce5..48e254cdb1 100644 --- a/synapse/replication/http/push.py +++ b/synapse/replication/http/push.py @@ -48,7 +48,7 @@ class ReplicationRemovePusherRestServlet(ReplicationEndpoint): """ - NAME = "add_user_account_data" + NAME = "remove_pusher" PATH_ARGS = ("user_id",) CACHE = False From 9920417723ce50c85ff27ba5b7e1251f83923e43 Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Fri, 4 Oct 2024 08:42:34 -0400 Subject: [PATCH 297/332] Don't say MSC4140 is supported when it's disabled (#17780) --- changelog.d/17780.bugfix | 1 + synapse/rest/client/versions.py | 2 +- tests/rest/client/test_delayed_events.py | 18 +++++++++++++++++- 3 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17780.bugfix diff --git a/changelog.d/17780.bugfix b/changelog.d/17780.bugfix new file mode 100644 index 0000000000..9d918ae745 --- /dev/null +++ b/changelog.d/17780.bugfix @@ -0,0 +1 @@ +In the response to `GET /_matrix/client/versions`, set the `unstable_features` flag for MSC4140 to `false` when server configuration disables support for delayed events. diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 1cfac87606..8028cf8ad2 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -172,7 +172,7 @@ class VersionsRestServlet(RestServlet): ) ), # MSC4140: Delayed events - "org.matrix.msc4140": True, + "org.matrix.msc4140": bool(self.config.server.max_event_delay_ms), # MSC4151: Report room API (Client-Server API) "org.matrix.msc4151": self.config.experimental.msc4151_enabled, # Simplified sliding sync diff --git a/tests/rest/client/test_delayed_events.py b/tests/rest/client/test_delayed_events.py index 34d9fe7958..cb77c73da2 100644 --- a/tests/rest/client/test_delayed_events.py +++ b/tests/rest/client/test_delayed_events.py @@ -8,11 +8,12 @@ from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import Codes -from synapse.rest.client import delayed_events, room +from synapse.rest.client import delayed_events, room, versions from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock +from tests import unittest from tests.unittest import HomeserverTestCase PATH_PREFIX = "/_matrix/client/unstable/org.matrix.msc4140/delayed_events" @@ -21,6 +22,21 @@ _HS_NAME = "red" _EVENT_TYPE = "com.example.test" +class DelayedEventsUnstableSupportTestCase(HomeserverTestCase): + servlets = [versions.register_servlets] + + def test_false_by_default(self) -> None: + channel = self.make_request("GET", "/_matrix/client/versions") + self.assertEqual(channel.code, 200, channel.result) + self.assertFalse(channel.json_body["unstable_features"]["org.matrix.msc4140"]) + + @unittest.override_config({"max_event_delay_duration": "24h"}) + def test_true_if_enabled(self) -> None: + channel = self.make_request("GET", "/_matrix/client/versions") + self.assertEqual(channel.code, 200, channel.result) + self.assertTrue(channel.json_body["unstable_features"]["org.matrix.msc4140"]) + + class DelayedEventsTestCase(HomeserverTestCase): """Tests getting and managing delayed events.""" From d34f827ed888beb4f0c415605281b8b1da1bb776 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 09:14:30 +0100 Subject: [PATCH 298/332] Bump python-multipart from 0.0.10 to 0.0.12 (#17772) --- poetry.lock | 6 +++--- synapse/http/client.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7ab990e76b..bf30fbbe15 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1974,13 +1974,13 @@ six = ">=1.5" [[package]] name = "python-multipart" -version = "0.0.10" +version = "0.0.12" description = "A streaming multipart parser for Python" optional = false python-versions = ">=3.8" files = [ - {file = "python_multipart-0.0.10-py3-none-any.whl", hash = "sha256:2b06ad9e8d50c7a8db80e3b56dab590137b323410605af2be20d62a5f1ba1dc8"}, - {file = "python_multipart-0.0.10.tar.gz", hash = "sha256:46eb3c6ce6fdda5fb1a03c7e11d490e407c6930a2703fe7aef4da71c374688fa"}, + {file = "python_multipart-0.0.12-py3-none-any.whl", hash = "sha256:43dcf96cf65888a9cd3423544dd0d75ac10f7aa0c3c28a175bbcd00c9ce1aebf"}, + {file = "python_multipart-0.0.12.tar.gz", hash = "sha256:045e1f98d719c1ce085ed7f7e1ef9d8ccc8c02ba02b5566d5f7521410ced58cb"}, ] [[package]] diff --git a/synapse/http/client.py b/synapse/http/client.py index 143fee9796..c3b2299c95 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1039,7 +1039,7 @@ class _MultipartParserProtocol(protocol.Protocol): self.deferred = deferred self.boundary = boundary self.max_length = max_length - self.parser = None + self.parser: Optional[multipart.MultipartParser] = None self.multipart_response = MultipartResponse() self.has_redirect = False self.in_json = False @@ -1097,7 +1097,7 @@ class _MultipartParserProtocol(protocol.Protocol): self.deferred.errback() self.file_length += end - start - callbacks = { + callbacks: "multipart.multipart.MultipartCallbacks" = { "on_header_field": on_header_field, "on_header_value": on_header_value, "on_part_data": on_part_data, @@ -1113,7 +1113,7 @@ class _MultipartParserProtocol(protocol.Protocol): self.transport.abortConnection() try: - self.parser.write(incoming_data) # type: ignore[attr-defined] + self.parser.write(incoming_data) except Exception as e: logger.warning(f"Exception writing to multipart parser: {e}") self.deferred.errback() From beb7a951f4b5dffda74428d039be42fe3e37334b Mon Sep 17 00:00:00 2001 From: Henrique Date: Mon, 7 Oct 2024 05:37:39 -0300 Subject: [PATCH 299/332] docs: add note about PYTHONMALLOC for accurate jemalloc memory tracking (#17709) Added a note in the documentation suggesting that users may set `PYTHONMALLOC=malloc` when using `jemalloc`. This allows jemalloc to track memory usage more accurately by bypassing Python's internal small-object allocator (`pymalloc`), helping to ensure that `cache_autotuning` functions as expected. This doc change aims to provide more clarity for users configuring jemalloc with Synapse. Based on: https://github.com/element-hq/synapse/blob/4ac783549c5bac7a490a715d359f330bb0b1a161/synapse/metrics/jemalloc.py#L198-L201 --- changelog.d/17709.doc | 1 + docs/usage/administration/admin_faq.md | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/17709.doc diff --git a/changelog.d/17709.doc b/changelog.d/17709.doc new file mode 100644 index 0000000000..8fffc1ca0c --- /dev/null +++ b/changelog.d/17709.doc @@ -0,0 +1 @@ +Add documentation note about PYTHONMALLOC for accurate jemalloc memory tracking. Contributed by @hensg. diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md index a1184d0375..0dce3d3e37 100644 --- a/docs/usage/administration/admin_faq.md +++ b/docs/usage/administration/admin_faq.md @@ -255,6 +255,8 @@ line to `/etc/default/matrix-synapse`: LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2 +*Note*: You may need to set `PYTHONMALLOC=malloc` to ensure that `jemalloc` can accurately calculate memory usage. By default, Python uses its internal small-object allocator, which may interfere with jemalloc's ability to track memory consumption correctly. This could prevent the [cache_autotuning](../configuration/config_documentation.md#caches-and-associated-values) feature from functioning as expected, as the Python allocator may not reach the memory threshold set by `max_cache_memory_usage`, thus not triggering the cache eviction process. + This made a significant difference on Python 2.7 - it's unclear how much of an improvement it provides on Python 3.x. From e8e0f0fad71ef253e08c9f7dab1b2837d5f2334d Mon Sep 17 00:00:00 2001 From: V02460 Date: Mon, 7 Oct 2024 10:46:51 +0200 Subject: [PATCH 300/332] Add config option redis.password_path (#17717) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds the option to load the Redis password from a file, instead of giving it in the config directly. The code is similar to how it’s done for `registration_shared_secret_path`. I changed the example in the documentation to represent the best practice regarding the handling of secrets. Reading secrets from files has the security advantage of separating the secrets from the config. It also simplifies secrets management in Kubernetes. --- changelog.d/17717.feature | 1 + .../configuration/config_documentation.md | 8 ++- synapse/config/redis.py | 18 +++++- tests/config/test_load.py | 56 +++++++++++++++++++ 4 files changed, 81 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17717.feature diff --git a/changelog.d/17717.feature b/changelog.d/17717.feature new file mode 100644 index 0000000000..292c99ccc5 --- /dev/null +++ b/changelog.d/17717.feature @@ -0,0 +1 @@ +Add config option `redis.password_path`. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 08eedc03b7..29f3528c7e 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -4524,6 +4524,9 @@ This setting has the following sub-options: * `path`: The full path to a local Unix socket file. **If this is used, `host` and `port` are ignored.** Defaults to `/tmp/redis.sock' * `password`: Optional password if configured on the Redis instance. +* `password_path`: Alternative to `password`, reading the password from an + external file. The file should be a plain text file, containing only the + password. Synapse reads the password from the given file once at startup. * `dbid`: Optional redis dbid if needs to connect to specific redis logical db. * `use_tls`: Whether to use tls connection. Defaults to false. * `certificate_file`: Optional path to the certificate file @@ -4537,13 +4540,16 @@ This setting has the following sub-options: _Changed in Synapse 1.85.0: Added path option to use a local Unix socket_ + _Changed in Synapse 1.116.0: Added password\_path_ + Example configuration: ```yaml redis: enabled: true host: localhost port: 6379 - password: + password_path: + # OR password: dbid: #use_tls: True #certificate_file: diff --git a/synapse/config/redis.py b/synapse/config/redis.py index f140538088..3f38fa11b0 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -21,10 +21,15 @@ from typing import Any -from synapse.config._base import Config +from synapse.config._base import Config, ConfigError, read_file from synapse.types import JsonDict from synapse.util.check_dependencies import check_requirements +CONFLICTING_PASSWORD_OPTS_ERROR = """\ +You have configured both `redis.password` and `redis.password_path`. +These are mutually incompatible. +""" + class RedisConfig(Config): section = "redis" @@ -43,6 +48,17 @@ class RedisConfig(Config): self.redis_path = redis_config.get("path", None) self.redis_dbid = redis_config.get("dbid", None) self.redis_password = redis_config.get("password") + redis_password_path = redis_config.get("password_path") + if redis_password_path: + if self.redis_password: + raise ConfigError(CONFLICTING_PASSWORD_OPTS_ERROR) + self.redis_password = read_file( + redis_password_path, + ( + "redis", + "password_path", + ), + ).strip() self.redis_use_tls = redis_config.get("use_tls", False) self.redis_certificate = redis_config.get("certificate_file", None) diff --git a/tests/config/test_load.py b/tests/config/test_load.py index 479d2aab91..c5dee06af5 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -19,13 +19,23 @@ # [This file includes modifications made by New Vector Limited] # # +import tempfile +from typing import Callable + import yaml +from parameterized import parameterized from synapse.config import ConfigError +from synapse.config._base import RootConfig from synapse.config.homeserver import HomeServerConfig from tests.config.utils import ConfigFileTestCase +try: + import hiredis +except ImportError: + hiredis = None # type: ignore + class ConfigLoadingFileTestCase(ConfigFileTestCase): def test_load_fails_if_server_name_missing(self) -> None: @@ -116,3 +126,49 @@ class ConfigLoadingFileTestCase(ConfigFileTestCase): self.add_lines_to_config(["trust_identity_server_for_password_resets: true"]) with self.assertRaises(ConfigError): HomeServerConfig.load_config("", ["-c", self.config_file]) + + @parameterized.expand( + [ + "turn_shared_secret_path: /does/not/exist", + "registration_shared_secret_path: /does/not/exist", + *["redis:\n enabled: true\n password_path: /does/not/exist"] + * (hiredis is not None), + ] + ) + def test_secret_files_missing(self, config_str: str) -> None: + self.generate_config() + self.add_lines_to_config(["", config_str]) + + with self.assertRaises(ConfigError): + HomeServerConfig.load_config("", ["-c", self.config_file]) + + @parameterized.expand( + [ + ( + "turn_shared_secret_path: {}", + lambda c: c.voip.turn_shared_secret, + ), + ( + "registration_shared_secret_path: {}", + lambda c: c.registration.registration_shared_secret, + ), + *[ + ( + "redis:\n enabled: true\n password_path: {}", + lambda c: c.redis.redis_password, + ) + ] + * (hiredis is not None), + ] + ) + def test_secret_files_existing( + self, config_line: str, get_secret: Callable[[RootConfig], str] + ) -> None: + self.generate_config_and_remove_lines_containing("registration_shared_secret") + with tempfile.NamedTemporaryFile(buffering=0) as secret_file: + secret_file.write(b"53C237") + + self.add_lines_to_config(["", config_line.format(secret_file.name)]) + config = HomeServerConfig.load_config("", ["-c", self.config_file]) + + self.assertEqual(get_secret(config), "53C237") From e8c8924b810fa7fa7a4bef135ce8ed6b9b6256cc Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 7 Oct 2024 16:34:32 +0100 Subject: [PATCH 301/332] Clarify `test_forget_when_not_left` docstring (#17628) --- changelog.d/17628.doc | 1 + tests/handlers/test_room_member.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17628.doc diff --git a/changelog.d/17628.doc b/changelog.d/17628.doc new file mode 100644 index 0000000000..8b9f436e5f --- /dev/null +++ b/changelog.d/17628.doc @@ -0,0 +1 @@ +Clarify the docstring of `test_forget_when_not_left`. \ No newline at end of file diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index acb403cb2f..ad77356ede 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -380,7 +380,7 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase): ) def test_forget_when_not_left(self) -> None: - """Tests that a user cannot not forgets a room that has not left.""" + """Tests that a user cannot forget a room that they are still in.""" self.get_failure(self.handler.forget(self.alice_ID, self.room_id), SynapseError) def test_nonlocal_room_user_action(self) -> None: From e2610de2084f44340bcf0229e2dcbf0a915cbfb1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Oct 2024 10:35:15 +0100 Subject: [PATCH 302/332] Speed up sliding sync when there are many active subscriptions (#17789) Two changes: a) use a batch lookup function instead of a loop, b) check existing data to see if we already have what we need and only fetch what we don't. --- changelog.d/17789.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 18 ++++--- synapse/storage/databases/main/roommember.py | 51 ++++++++++++++++++++ 3 files changed, 63 insertions(+), 7 deletions(-) create mode 100644 changelog.d/17789.misc diff --git a/changelog.d/17789.misc b/changelog.d/17789.misc new file mode 100644 index 0000000000..43ed360ce8 --- /dev/null +++ b/changelog.d/17789.misc @@ -0,0 +1 @@ +Speed up sliding sync when there are many active subscriptions. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 08e619042b..a1730b7e05 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -500,6 +500,16 @@ class SlidingSyncRoomLists: # depending on the `required_state` requested (see below). partial_state_rooms = await self.store.get_partial_rooms() + # Fetch any rooms that we have not already fetched from the database. + subscription_sliding_sync_rooms = ( + await self.store.get_sliding_sync_room_for_user_batch( + user_id, + sync_config.room_subscriptions.keys() + - room_membership_for_user_map.keys(), + ) + ) + room_membership_for_user_map.update(subscription_sliding_sync_rooms) + for ( room_id, room_subscription, @@ -507,17 +517,11 @@ class SlidingSyncRoomLists: # Check if we have a membership for the room, but didn't pull it out # above. This could be e.g. a leave that we don't pull out by # default. - current_room_entry = ( - await self.store.get_sliding_sync_room_for_user( - user_id, room_id - ) - ) + current_room_entry = room_membership_for_user_map.get(room_id) if not current_room_entry: # TODO: Handle rooms the user isn't in. continue - room_membership_for_user_map[room_id] = current_room_entry - all_rooms.add(room_id) # Take the superset of the `RoomSyncConfig` for each room. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 0a62613d34..6f15e51339 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1499,6 +1499,57 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): "get_sliding_sync_room_for_user", get_sliding_sync_room_for_user_txn ) + async def get_sliding_sync_room_for_user_batch( + self, user_id: str, room_ids: StrCollection + ) -> Dict[str, RoomsForUserSlidingSync]: + """Get the sliding sync room entry for the given user and rooms.""" + + if not room_ids: + return {} + + def get_sliding_sync_room_for_user_batch_txn( + txn: LoggingTransaction, + ) -> Dict[str, RoomsForUserSlidingSync]: + clause, args = make_in_list_sql_clause( + self.database_engine, "m.room_id", room_ids + ) + sql = f""" + SELECT m.room_id, m.sender, m.membership, m.membership_event_id, + r.room_version, + m.event_instance_name, m.event_stream_ordering, + m.has_known_state, + COALESCE(j.room_type, m.room_type), + COALESCE(j.is_encrypted, m.is_encrypted) + FROM sliding_sync_membership_snapshots AS m + INNER JOIN rooms AS r USING (room_id) + LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join') + WHERE m.forgotten = 0 + AND {clause} + AND user_id = ? + """ + args.append(user_id) + txn.execute(sql, args) + + return { + row[0]: RoomsForUserSlidingSync( + room_id=row[0], + sender=row[1], + membership=row[2], + event_id=row[3], + room_version_id=row[4], + event_pos=PersistedEventPosition(row[5], row[6]), + has_known_state=bool(row[7]), + room_type=row[8], + is_encrypted=row[9], + ) + for row in txn + } + + return await self.db_pool.runInteraction( + "get_sliding_sync_room_for_user_batch", + get_sliding_sync_room_for_user_batch_txn, + ) + class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__( From 4e90221d87ab9b5523eaecede4325e2e334bb622 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Oct 2024 11:06:31 +0100 Subject: [PATCH 303/332] Sliding sync minor performance speed up using new table (#17787) Use the new tables to work out which rooms have changed. --------- Co-authored-by: Eric Eastwood --- changelog.d/17787.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 27 ++++++++++----- synapse/storage/databases/main/stream.py | 42 +++++++++++++++++++++++ 3 files changed, 61 insertions(+), 9 deletions(-) create mode 100644 changelog.d/17787.misc diff --git a/changelog.d/17787.misc b/changelog.d/17787.misc new file mode 100644 index 0000000000..41ac59b348 --- /dev/null +++ b/changelog.d/17787.misc @@ -0,0 +1 @@ +Sliding sync minor performance speed up using new table. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 9fcc68ff25..cb6a0b9f35 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -49,6 +49,7 @@ from synapse.types import ( Requester, SlidingSyncStreamToken, StateMap, + StrCollection, StreamKeyType, StreamToken, ) @@ -293,7 +294,6 @@ class SlidingSyncHandler: # to record rooms as having updates even if there might not actually # be anything new for the user (e.g. due to event filters, events # having happened after the user left, etc). - unsent_room_ids = [] if from_token: # The set of rooms that the client (may) care about, but aren't # in any list range (or subscribed to). @@ -305,15 +305,24 @@ class SlidingSyncHandler: # TODO: Replace this with something faster. When we land the # sliding sync tables that record the most recent event # positions we can use that. - missing_event_map_by_room = ( - await self.store.get_room_events_stream_for_rooms( - room_ids=missing_rooms, - from_key=to_token.room_key, - to_key=from_token.stream_token.room_key, - limit=1, + unsent_room_ids: StrCollection + if await self.store.have_finished_sliding_sync_background_jobs(): + unsent_room_ids = await ( + self.store.get_rooms_that_have_updates_since_sliding_sync_table( + room_ids=missing_rooms, + from_key=from_token.stream_token.room_key, + ) ) - ) - unsent_room_ids = list(missing_event_map_by_room) + else: + missing_event_map_by_room = ( + await self.store.get_room_events_stream_for_rooms( + room_ids=missing_rooms, + from_key=to_token.room_key, + to_key=from_token.stream_token.room_key, + limit=1, + ) + ) + unsent_room_ids = list(missing_event_map_by_room) new_connection_state.rooms.record_unsent_rooms( unsent_room_ids, from_token.stream_token.room_key diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 964f41ca57..b4258a4436 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -751,6 +751,48 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if self._events_stream_cache.has_entity_changed(room_id, from_id) } + async def get_rooms_that_have_updates_since_sliding_sync_table( + self, + room_ids: StrCollection, + from_key: RoomStreamToken, + ) -> StrCollection: + """Return the rooms that probably have had updates since the given + token (changes that are > `from_key`).""" + # If the stream change cache is valid for the stream token, we can just + # use the result of that. + if from_key.stream >= self._events_stream_cache.get_earliest_known_position(): + return self._events_stream_cache.get_entities_changed( + room_ids, from_key.stream + ) + + def get_rooms_that_have_updates_since_sliding_sync_table_txn( + txn: LoggingTransaction, + ) -> StrCollection: + sql = """ + SELECT room_id + FROM sliding_sync_joined_rooms + WHERE {clause} + AND event_stream_ordering > ? + """ + + results: Set[str] = set() + for batch in batch_iter(room_ids, 1000): + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", batch + ) + + args.append(from_key.stream) + txn.execute(sql.format(clause=clause), args) + + results.update(row[0] for row in txn) + + return results + + return await self.db_pool.runInteraction( + "get_rooms_that_have_updates_since_sliding_sync_table", + get_rooms_that_have_updates_since_sliding_sync_table_txn, + ) + async def paginate_room_events_by_stream_ordering( self, *, From 422f3ecec1ffb4fa352f83d7ec8b3327f36a93c8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Oct 2024 11:17:23 +0100 Subject: [PATCH 304/332] Sliding sync: omit bump stamp when it is unchanged (#17788) This saves some DB lookups in rooms --- changelog.d/17788.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 54 +++++++++--- synapse/rest/client/sync.py | 4 +- synapse/types/handlers/sliding_sync.py | 3 +- .../client/sliding_sync/test_rooms_meta.py | 86 +++++++++++++++++++ 5 files changed, 136 insertions(+), 12 deletions(-) create mode 100644 changelog.d/17788.misc diff --git a/changelog.d/17788.misc b/changelog.d/17788.misc new file mode 100644 index 0000000000..1ef6f6e2ba --- /dev/null +++ b/changelog.d/17788.misc @@ -0,0 +1 @@ +Sliding sync minor performance improvement by omitting unchanged data from incremental responses. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index cb6a0b9f35..8c12cea8eb 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -1057,22 +1057,42 @@ class SlidingSyncHandler: ) ) - # Figure out the last bump event in the room - # - # By default, just choose the membership event position for any non-join membership - bump_stamp = room_membership_for_user_at_to_token.event_pos.stream + # Figure out the last bump event in the room. If the bump stamp hasn't + # changed we omit it from the response. + bump_stamp = None + + always_return_bump_stamp = ( + # We use the membership event position for any non-join + room_membership_for_user_at_to_token.membership != Membership.JOIN + # We didn't fetch any timeline events but we should still check for + # a bump_stamp that might be somewhere + or limited is None + # There might be a bump event somewhere before the timeline events + # that we fetched, that we didn't previously send down + or limited is True + # Always give the client some frame of reference if this is the + # first time they are seeing the room down the connection + or initial + ) + # If we're joined to the room, we need to find the last bump event before the # `to_token` if room_membership_for_user_at_to_token.membership == Membership.JOIN: - # Try and get a bump stamp, if not we just fall back to the - # membership token. + # Try and get a bump stamp new_bump_stamp = await self._get_bump_stamp( - room_id, to_token, timeline_events + room_id, + to_token, + timeline_events, + check_outside_timeline=always_return_bump_stamp, ) if new_bump_stamp is not None: bump_stamp = new_bump_stamp - if bump_stamp < 0: + if bump_stamp is None and always_return_bump_stamp: + # By default, just choose the membership event position for any non-join membership + bump_stamp = room_membership_for_user_at_to_token.event_pos.stream + + if bump_stamp is not None and bump_stamp < 0: # We never want to send down negative stream orderings, as you can't # sensibly compare positive and negative stream orderings (they have # different meanings). @@ -1165,14 +1185,23 @@ class SlidingSyncHandler: @trace async def _get_bump_stamp( - self, room_id: str, to_token: StreamToken, timeline: List[EventBase] + self, + room_id: str, + to_token: StreamToken, + timeline: List[EventBase], + check_outside_timeline: bool, ) -> Optional[int]: - """Get a bump stamp for the room, if we have a bump event + """Get a bump stamp for the room, if we have a bump event and it has + changed. Args: room_id to_token: The upper bound of token to return timeline: The list of events we have fetched. + limited: If the timeline was limited. + check_outside_timeline: Whether we need to check for bump stamp for + events before the timeline if we didn't find a bump stamp in + the timeline events. """ # First check the timeline events we're returning to see if one of @@ -1192,6 +1221,11 @@ class SlidingSyncHandler: if new_bump_stamp > 0: return new_bump_stamp + if not check_outside_timeline: + # If we are not a limited sync, then we know the bump stamp can't + # have changed. + return None + # We can quickly query for the latest bump event in the room using the # sliding sync tables. latest_room_bump_stamp = await self.store.get_latest_bump_stamp_for_room( diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 364cf40153..122708e933 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -1010,11 +1010,13 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms: Dict[str, JsonDict] = {} for room_id, room_result in rooms.items(): serialized_rooms[room_id] = { - "bump_stamp": room_result.bump_stamp, "notification_count": room_result.notification_count, "highlight_count": room_result.highlight_count, } + if room_result.bump_stamp is not None: + serialized_rooms[room_id]["bump_stamp"] = room_result.bump_stamp + if room_result.joined_count is not None: serialized_rooms[room_id]["joined_count"] = room_result.joined_count diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index 5dd2c9d411..aae60fddea 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -158,6 +158,7 @@ class SlidingSyncResult: name changes to mark the room as unread and bump it to the top. For encrypted rooms, we just have to consider any activity as a bump because we can't see the content and the client has to figure it out for themselves. + This may not be included if there hasn't been a change. joined_count: The number of users with membership of join, including the client's own user ID. (same as sync `v2 m.joined_member_count`) invited_count: The number of users with membership of invite. (same as sync v2 @@ -193,7 +194,7 @@ class SlidingSyncResult: limited: Optional[bool] # Only optional because it won't be included for invite/knock rooms with `stripped_state` num_live: Optional[int] - bump_stamp: int + bump_stamp: Optional[int] joined_count: Optional[int] invited_count: Optional[int] notification_count: int diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py index c619dd83fb..0a8b2c02c2 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py @@ -1096,6 +1096,92 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0) + def test_rooms_bump_stamp_no_change_incremental(self) -> None: + """Test that the bump stamp is omitted if there has been no change""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 100, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Initial sync so we expect to see a bump stamp + self.assertIn("bump_stamp", response_body["rooms"][room_id1]) + + # Send an event that is not in the bump events list + self.helper.send_event( + room_id1, type="org.matrix.test", content={}, tok=user1_tok + ) + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # There hasn't been a change to the bump stamps, so we ignore it + self.assertNotIn("bump_stamp", response_body["rooms"][room_id1]) + + def test_rooms_bump_stamp_change_incremental(self) -> None: + """Test that the bump stamp is included if there has been a change, even + if its not in the timeline""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 2, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Initial sync so we expect to see a bump stamp + self.assertIn("bump_stamp", response_body["rooms"][room_id1]) + first_bump_stamp = response_body["rooms"][room_id1]["bump_stamp"] + + # Send a bump event at the start. + self.helper.send(room_id1, "test", tok=user1_tok) + + # Send events that are not in the bump events list to fill the timeline + for _ in range(5): + self.helper.send_event( + room_id1, type="org.matrix.test", content={}, tok=user1_tok + ) + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # There was a bump event in the timeline gap, so we should see the bump + # stamp be updated. + self.assertIn("bump_stamp", response_body["rooms"][room_id1]) + second_bump_stamp = response_body["rooms"][room_id1]["bump_stamp"] + + self.assertGreater(second_bump_stamp, first_bump_stamp) + def test_rooms_bump_stamp_invites(self) -> None: """ Test that `bump_stamp` is present and points to the membership event, From 006251a5d0fb7f4dbaba00ca1ee92bdc968120ff Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Tue, 8 Oct 2024 07:01:44 -0400 Subject: [PATCH 305/332] Add missing license header (#17799) Co-authored-by: Erik Johnston --- changelog.d/17799.misc | 1 + synapse/app/generic_worker.py | 2 +- synapse/handlers/delayed_events.py | 14 ++++++++++++++ synapse/replication/http/__init__.py | 2 +- synapse/replication/http/delayed_events.py | 14 ++++++++++++++ synapse/rest/client/delayed_events.py | 14 ++++++++++++++ synapse/storage/databases/main/delayed_events.py | 14 ++++++++++++++ .../schema/main/delta/88/01_add_delayed_events.sql | 13 +++++++++++++ tests/rest/client/test_delayed_events.py | 14 ++++++++++++++ tests/rest/client/test_rooms.py | 2 +- 10 files changed, 87 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17799.misc diff --git a/changelog.d/17799.misc b/changelog.d/17799.misc new file mode 100644 index 0000000000..99022f4f53 --- /dev/null +++ b/changelog.d/17799.misc @@ -0,0 +1 @@ +Add missing license headers on new source files. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 6a944998f1..a528c3890d 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -3,7 +3,7 @@ # # Copyright 2020 The Matrix.org Foundation C.I.C. # Copyright 2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py index 9d59a09948..3c88a96fd3 100644 --- a/synapse/handlers/delayed_events.py +++ b/synapse/handlers/delayed_events.py @@ -1,3 +1,17 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + import logging from typing import TYPE_CHECKING, List, Optional, Set, Tuple diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py index 1673bd057e..d500051714 100644 --- a/synapse/replication/http/__init__.py +++ b/synapse/replication/http/__init__.py @@ -1,7 +1,7 @@ # # This file is licensed under the Affero General Public License (AGPL) version 3. # -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as diff --git a/synapse/replication/http/delayed_events.py b/synapse/replication/http/delayed_events.py index 77dabb08e6..229022070c 100644 --- a/synapse/replication/http/delayed_events.py +++ b/synapse/replication/http/delayed_events.py @@ -1,3 +1,17 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + import logging from typing import TYPE_CHECKING, Dict, Optional, Tuple diff --git a/synapse/rest/client/delayed_events.py b/synapse/rest/client/delayed_events.py index eae5c9d226..2dd5a60b2b 100644 --- a/synapse/rest/client/delayed_events.py +++ b/synapse/rest/client/delayed_events.py @@ -1,3 +1,17 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + # This module contains REST servlets to do with delayed events: /delayed_events/ import logging diff --git a/synapse/storage/databases/main/delayed_events.py b/synapse/storage/databases/main/delayed_events.py index 1a7781713f..1616e30e22 100644 --- a/synapse/storage/databases/main/delayed_events.py +++ b/synapse/storage/databases/main/delayed_events.py @@ -1,3 +1,17 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + import logging from typing import List, NewType, Optional, Tuple diff --git a/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql b/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql index 55bfbc8ae7..78ba5129af 100644 --- a/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql +++ b/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql @@ -1,3 +1,16 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + CREATE TABLE delayed_events ( delay_id TEXT NOT NULL, user_localpart TEXT NOT NULL, diff --git a/tests/rest/client/test_delayed_events.py b/tests/rest/client/test_delayed_events.py index cb77c73da2..1793b38c4a 100644 --- a/tests/rest/client/test_delayed_events.py +++ b/tests/rest/client/test_delayed_events.py @@ -1,3 +1,17 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + """Tests REST events for /delayed_events paths.""" from http import HTTPStatus diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 00be0051c6..2ecd37ca1a 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -4,7 +4,7 @@ # Copyright 2019 The Matrix.org Foundation C.I.C. # Copyright 2017 Vector Creations Ltd # Copyright 2014-2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as From a5986ac229b97c06be0f50fcb30d27285035ebc3 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 8 Oct 2024 06:23:21 -0700 Subject: [PATCH 306/332] Improvements to admin redact api (#17792) - better validation on user input - fix an early task completion - when checking membership in rooms, check for rooms user has been banned from as well --- changelog.d/17792.bugfix | 1 + synapse/handlers/admin.py | 4 +- synapse/rest/admin/users.py | 45 ++++++------ synapse/storage/databases/main/roommember.py | 21 ++++++ tests/rest/admin/test_user.py | 77 ++++++++++++++++---- 5 files changed, 107 insertions(+), 41 deletions(-) create mode 100644 changelog.d/17792.bugfix diff --git a/changelog.d/17792.bugfix b/changelog.d/17792.bugfix new file mode 100644 index 0000000000..451b32782e --- /dev/null +++ b/changelog.d/17792.bugfix @@ -0,0 +1 @@ +Improve input validation and room membership checks in admin redaction API. \ No newline at end of file diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 58d89080ff..851fe57a17 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -443,8 +443,8 @@ class AdminHandler: ["m.room.member", "m.room.message"], ) if not event_ids: - # there's nothing to redact - return TaskStatus.COMPLETE, result, None + # nothing to redact in this room + continue events = await self._store.get_events_as_list(event_ids) for event in events: diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 81dfb57a95..b146c2754d 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -27,7 +27,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import attr -from synapse._pydantic_compat import StrictBool +from synapse._pydantic_compat import StrictBool, StrictInt, StrictStr from synapse.api.constants import Direction, UserTypes from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( @@ -1421,40 +1421,39 @@ class RedactUser(RestServlet): self._store = hs.get_datastores().main self.admin_handler = hs.get_admin_handler() + class PostBody(RequestBodyModel): + rooms: List[StrictStr] + reason: Optional[StrictStr] + limit: Optional[StrictInt] + async def on_POST( self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) await assert_user_is_admin(self._auth, requester) - body = parse_json_object_from_request(request, allow_empty_body=True) - rooms = body.get("rooms") - if rooms is None: + # parse provided user id to check that it is valid + UserID.from_string(user_id) + + body = parse_and_validate_json_object_from_request(request, self.PostBody) + + limit = body.limit + if limit and limit <= 0: raise SynapseError( - HTTPStatus.BAD_REQUEST, "Must provide a value for rooms." + HTTPStatus.BAD_REQUEST, + "If limit is provided it must be a non-negative integer greater than 0.", ) - reason = body.get("reason") - if reason: - if not isinstance(reason, str): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "If a reason is provided it must be a string.", - ) - - limit = body.get("limit") - if limit: - if not isinstance(limit, int) or limit <= 0: - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "If limit is provided it must be a non-negative integer greater than 0.", - ) - + rooms = body.rooms if not rooms: - rooms = await self._store.get_rooms_for_user(user_id) + current_rooms = list(await self._store.get_rooms_for_user(user_id)) + banned_rooms = list( + await self._store.get_rooms_user_currently_banned_from(user_id) + ) + rooms = current_rooms + banned_rooms redact_id = await self.admin_handler.start_redact_events( - user_id, list(rooms), requester.serialize(), reason, limit + user_id, rooms, requester.serialize(), body.reason, limit ) return HTTPStatus.OK, {"redact_id": redact_id} diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 6f15e51339..c77e009d03 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -711,6 +711,27 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): return {row[0] for row in txn} + async def get_rooms_user_currently_banned_from( + self, user_id: str + ) -> FrozenSet[str]: + """Returns a set of room_ids the user is currently banned from. + + If a remote user only returns rooms this server is currently + participating in. + """ + room_ids = await self.db_pool.simple_select_onecol( + table="current_state_events", + keyvalues={ + "type": EventTypes.Member, + "membership": Membership.BAN, + "state_key": user_id, + }, + retcol="room_id", + desc="get_rooms_user_currently_banned_from", + ) + + return frozenset(room_ids) + @cached(max_entries=500000, iterable=True) async def get_rooms_for_user(self, user_id: str) -> FrozenSet[str]: """Returns a set of room_ids the user is currently joined to. diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index ef918efe49..6982c7291a 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -5288,19 +5288,26 @@ class UserRedactionTestCase(unittest.HomeserverTestCase): self.assertEqual(len(matched), len(rm2_originals)) def test_admin_redact_works_if_user_kicked_or_banned(self) -> None: - originals = [] + originals1 = [] + originals2 = [] for rm in [self.rm1, self.rm2, self.rm3]: join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) - originals.append(join["event_id"]) + if rm in [self.rm1, self.rm3]: + originals1.append(join["event_id"]) + else: + originals2.append(join["event_id"]) for i in range(5): event = {"body": f"hello{i}", "msgtype": "m.text"} res = self.helper.send_event( rm, "m.room.message", event, tok=self.bad_user_tok ) - originals.append(res["event_id"]) + if rm in [self.rm1, self.rm3]: + originals1.append(res["event_id"]) + else: + originals2.append(res["event_id"]) # kick user from rooms 1 and 3 - for r in [self.rm1, self.rm2]: + for r in [self.rm1, self.rm3]: channel = self.make_request( "POST", f"/_matrix/client/r0/rooms/{r}/kick", @@ -5330,32 +5337,70 @@ class UserRedactionTestCase(unittest.HomeserverTestCase): failed_redactions = channel2.json_body.get("failed_redactions") self.assertEqual(failed_redactions, {}) - # ban user - channel3 = self.make_request( + # double check + for rm in [self.rm1, self.rm3]: + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel3 = self.make_request( + "GET", + f"rooms/{rm}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel3.code, 200) + + matches = [] + for event in channel3.json_body["chunk"]: + for event_id in originals1: + if ( + event["type"] == "m.room.redaction" + and event["redacts"] == event_id + ): + matches.append((event_id, event)) + # we redacted 6 messages + self.assertEqual(len(matches), 6) + + # ban user from room 2 + channel4 = self.make_request( "POST", f"/_matrix/client/r0/rooms/{self.rm2}/ban", content={"reason": "being a bummer", "user_id": self.bad_user}, access_token=self.admin_tok, ) - self.assertEqual(channel3.code, HTTPStatus.OK, channel3.result) + self.assertEqual(channel4.code, HTTPStatus.OK, channel4.result) - # redact messages in room 2 - channel4 = self.make_request( + # make a request to ban all user's messages + channel5 = self.make_request( "POST", f"/_synapse/admin/v1/user/{self.bad_user}/redact", - content={"rooms": [self.rm2]}, + content={"rooms": []}, access_token=self.admin_tok, ) - self.assertEqual(channel4.code, 200) - id2 = channel1.json_body.get("redact_id") + self.assertEqual(channel5.code, 200) + id2 = channel5.json_body.get("redact_id") # check that there were no failed redactions in room 2 - channel5 = self.make_request( + channel6 = self.make_request( "GET", f"/_synapse/admin/v1/user/redact_status/{id2}", access_token=self.admin_tok, ) - self.assertEqual(channel5.code, 200) - self.assertEqual(channel5.json_body.get("status"), "complete") - failed_redactions = channel5.json_body.get("failed_redactions") + self.assertEqual(channel6.code, 200) + self.assertEqual(channel6.json_body.get("status"), "complete") + failed_redactions = channel6.json_body.get("failed_redactions") self.assertEqual(failed_redactions, {}) + + # double check messages in room 2 were redacted + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel7 = self.make_request( + "GET", + f"rooms/{self.rm2}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel7.code, 200) + + matches = [] + for event in channel7.json_body["chunk"]: + for event_id in originals2: + if event["type"] == "m.room.redaction" and event["redacts"] == event_id: + matches.append((event_id, event)) + # we redacted 6 messages + self.assertEqual(len(matches), 6) From b3e2d10f39a87d19420fdf9114db2477039f6197 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Oct 2024 14:37:36 +0100 Subject: [PATCH 307/332] 1.117.0rc1 --- CHANGES.md | 40 +++++++++++++++++++++++++++++++++++++++ changelog.d/17628.doc | 1 - changelog.d/17709.doc | 1 - changelog.d/17717.feature | 1 - changelog.d/17749.doc | 1 - changelog.d/17751.misc | 1 - changelog.d/17765.misc | 1 - changelog.d/17766.misc | 1 - changelog.d/17767.misc | 1 - changelog.d/17768.misc | 1 - changelog.d/17771.misc | 1 - changelog.d/17776.doc | 1 - changelog.d/17779.bugfix | 1 - changelog.d/17780.bugfix | 1 - changelog.d/17787.misc | 1 - changelog.d/17788.misc | 1 - changelog.d/17789.misc | 1 - changelog.d/17792.bugfix | 1 - changelog.d/17799.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 21 files changed, 47 insertions(+), 19 deletions(-) delete mode 100644 changelog.d/17628.doc delete mode 100644 changelog.d/17709.doc delete mode 100644 changelog.d/17717.feature delete mode 100644 changelog.d/17749.doc delete mode 100644 changelog.d/17751.misc delete mode 100644 changelog.d/17765.misc delete mode 100644 changelog.d/17766.misc delete mode 100644 changelog.d/17767.misc delete mode 100644 changelog.d/17768.misc delete mode 100644 changelog.d/17771.misc delete mode 100644 changelog.d/17776.doc delete mode 100644 changelog.d/17779.bugfix delete mode 100644 changelog.d/17780.bugfix delete mode 100644 changelog.d/17787.misc delete mode 100644 changelog.d/17788.misc delete mode 100644 changelog.d/17789.misc delete mode 100644 changelog.d/17792.bugfix delete mode 100644 changelog.d/17799.misc diff --git a/CHANGES.md b/CHANGES.md index ae69046a1a..da8658c9a7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,43 @@ +# Synapse 1.117.0rc1 (2024-10-08) + +### Features + +- Add config option `redis.password_path`. ([\#17717](https://github.com/element-hq/synapse/issues/17717)) + +### Bugfixes + +- Fix a rare bug introduced in v1.29.0 where invalidating a user's access token from a worker could raise an error. ([\#17779](https://github.com/element-hq/synapse/issues/17779)) +- In the response to `GET /_matrix/client/versions`, set the `unstable_features` flag for MSC4140 to `false` when server configuration disables support for delayed events. ([\#17780](https://github.com/element-hq/synapse/issues/17780)) +- Improve input validation and room membership checks in admin redaction API. ([\#17792](https://github.com/element-hq/synapse/issues/17792)) + +### Improved Documentation + +- Clarify the docstring of `test_forget_when_not_left`. ([\#17628](https://github.com/element-hq/synapse/issues/17628)) +- Add documentation note about PYTHONMALLOC for accurate jemalloc memory tracking. Contributed by @hensg. ([\#17709](https://github.com/element-hq/synapse/issues/17709)) +- Remove spurious "TODO UPDATE ALL THIS" note in the Debian installation docs. ([\#17749](https://github.com/element-hq/synapse/issues/17749)) +- Explain how load balancing works for `federation_sender_instances`. ([\#17776](https://github.com/element-hq/synapse/issues/17776)) + +### Internal Changes + +- Minor performance increase for large accounts using sliding sync. ([\#17751](https://github.com/element-hq/synapse/issues/17751)) +- Increase performance of the notifier when there are many syncing users. ([\#17765](https://github.com/element-hq/synapse/issues/17765), [\#17766](https://github.com/element-hq/synapse/issues/17766)) +- Fix performance of streams that don't change often. ([\#17767](https://github.com/element-hq/synapse/issues/17767)) +- Improve performance of sliding sync connections that do not ask for any rooms. ([\#17768](https://github.com/element-hq/synapse/issues/17768)) +- Reduce overhead of sliding sync E2EE loops. ([\#17771](https://github.com/element-hq/synapse/issues/17771)) +- Sliding sync minor performance speed up using new table. ([\#17787](https://github.com/element-hq/synapse/issues/17787)) +- Sliding sync minor performance improvement by omitting unchanged data from incremental responses. ([\#17788](https://github.com/element-hq/synapse/issues/17788)) +- Speed up sliding sync when there are many active subscriptions. ([\#17789](https://github.com/element-hq/synapse/issues/17789)) +- Add missing license headers on new source files. ([\#17799](https://github.com/element-hq/synapse/issues/17799)) + + + +### Updates to locked dependencies + +* Bump phonenumbers from 8.13.45 to 8.13.46. ([\#17773](https://github.com/element-hq/synapse/issues/17773)) +* Bump python-multipart from 0.0.10 to 0.0.12. ([\#17772](https://github.com/element-hq/synapse/issues/17772)) +* Bump regex from 1.10.6 to 1.11.0. ([\#17770](https://github.com/element-hq/synapse/issues/17770)) +* Bump ruff from 0.6.7 to 0.6.8. ([\#17774](https://github.com/element-hq/synapse/issues/17774)) + # Synapse 1.116.0 (2024-10-01) No significant changes since 1.116.0rc2. diff --git a/changelog.d/17628.doc b/changelog.d/17628.doc deleted file mode 100644 index 8b9f436e5f..0000000000 --- a/changelog.d/17628.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify the docstring of `test_forget_when_not_left`. \ No newline at end of file diff --git a/changelog.d/17709.doc b/changelog.d/17709.doc deleted file mode 100644 index 8fffc1ca0c..0000000000 --- a/changelog.d/17709.doc +++ /dev/null @@ -1 +0,0 @@ -Add documentation note about PYTHONMALLOC for accurate jemalloc memory tracking. Contributed by @hensg. diff --git a/changelog.d/17717.feature b/changelog.d/17717.feature deleted file mode 100644 index 292c99ccc5..0000000000 --- a/changelog.d/17717.feature +++ /dev/null @@ -1 +0,0 @@ -Add config option `redis.password_path`. \ No newline at end of file diff --git a/changelog.d/17749.doc b/changelog.d/17749.doc deleted file mode 100644 index f00c0be3b7..0000000000 --- a/changelog.d/17749.doc +++ /dev/null @@ -1 +0,0 @@ -Remove spurious "TODO UPDATE ALL THIS" note in the Debian installation docs. diff --git a/changelog.d/17751.misc b/changelog.d/17751.misc deleted file mode 100644 index 4d35327481..0000000000 --- a/changelog.d/17751.misc +++ /dev/null @@ -1 +0,0 @@ -Minor performance increase for large accounts using sliding sync. diff --git a/changelog.d/17765.misc b/changelog.d/17765.misc deleted file mode 100644 index af4e5c85ea..0000000000 --- a/changelog.d/17765.misc +++ /dev/null @@ -1 +0,0 @@ -Increase performance of the notifier when there are many syncing users. diff --git a/changelog.d/17766.misc b/changelog.d/17766.misc deleted file mode 100644 index af4e5c85ea..0000000000 --- a/changelog.d/17766.misc +++ /dev/null @@ -1 +0,0 @@ -Increase performance of the notifier when there are many syncing users. diff --git a/changelog.d/17767.misc b/changelog.d/17767.misc deleted file mode 100644 index 36f23d0f60..0000000000 --- a/changelog.d/17767.misc +++ /dev/null @@ -1 +0,0 @@ -Fix performance of streams that don't change often. diff --git a/changelog.d/17768.misc b/changelog.d/17768.misc deleted file mode 100644 index 3b80e72534..0000000000 --- a/changelog.d/17768.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of sliding sync connections that do not ask for any rooms. diff --git a/changelog.d/17771.misc b/changelog.d/17771.misc deleted file mode 100644 index be28223151..0000000000 --- a/changelog.d/17771.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce overhead of sliding sync E2EE loops. diff --git a/changelog.d/17776.doc b/changelog.d/17776.doc deleted file mode 100644 index 86754a3464..0000000000 --- a/changelog.d/17776.doc +++ /dev/null @@ -1 +0,0 @@ -Explain how load balancing works for `federation_sender_instances`. diff --git a/changelog.d/17779.bugfix b/changelog.d/17779.bugfix deleted file mode 100644 index 72785830d9..0000000000 --- a/changelog.d/17779.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a rare bug introduced in v1.29.0 where invalidating a user's access token from a worker could raise an error. \ No newline at end of file diff --git a/changelog.d/17780.bugfix b/changelog.d/17780.bugfix deleted file mode 100644 index 9d918ae745..0000000000 --- a/changelog.d/17780.bugfix +++ /dev/null @@ -1 +0,0 @@ -In the response to `GET /_matrix/client/versions`, set the `unstable_features` flag for MSC4140 to `false` when server configuration disables support for delayed events. diff --git a/changelog.d/17787.misc b/changelog.d/17787.misc deleted file mode 100644 index 41ac59b348..0000000000 --- a/changelog.d/17787.misc +++ /dev/null @@ -1 +0,0 @@ -Sliding sync minor performance speed up using new table. diff --git a/changelog.d/17788.misc b/changelog.d/17788.misc deleted file mode 100644 index 1ef6f6e2ba..0000000000 --- a/changelog.d/17788.misc +++ /dev/null @@ -1 +0,0 @@ -Sliding sync minor performance improvement by omitting unchanged data from incremental responses. diff --git a/changelog.d/17789.misc b/changelog.d/17789.misc deleted file mode 100644 index 43ed360ce8..0000000000 --- a/changelog.d/17789.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up sliding sync when there are many active subscriptions. diff --git a/changelog.d/17792.bugfix b/changelog.d/17792.bugfix deleted file mode 100644 index 451b32782e..0000000000 --- a/changelog.d/17792.bugfix +++ /dev/null @@ -1 +0,0 @@ -Improve input validation and room membership checks in admin redaction API. \ No newline at end of file diff --git a/changelog.d/17799.misc b/changelog.d/17799.misc deleted file mode 100644 index 99022f4f53..0000000000 --- a/changelog.d/17799.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing license headers on new source files. diff --git a/debian/changelog b/debian/changelog index c37536d7e8..aa3c638359 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.117.0~rc1) stable; urgency=medium + + * New Synapse release 1.117.0rc1. + + -- Synapse Packaging team Tue, 08 Oct 2024 14:37:11 +0100 + matrix-synapse-py3 (1.116.0) stable; urgency=medium * New Synapse release 1.116.0. diff --git a/pyproject.toml b/pyproject.toml index 0246407f18..bfcf368641 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.116.0" +version = "1.117.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 43040a4051ac6cfc13ad905c550a03d582b80633 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:39:19 +0100 Subject: [PATCH 308/332] Bump ruff from 0.6.8 to 0.6.9 (#17794) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index bf30fbbe15..79bf395401 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2277,29 +2277,29 @@ files = [ [[package]] name = "ruff" -version = "0.6.8" +version = "0.6.9" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.8-py3-none-linux_armv6l.whl", hash = "sha256:77944bca110ff0a43b768f05a529fecd0706aac7bcce36d7f1eeb4cbfca5f0f2"}, - {file = "ruff-0.6.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27b87e1801e786cd6ede4ada3faa5e254ce774de835e6723fd94551464c56b8c"}, - {file = "ruff-0.6.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd48f945da2a6334f1793d7f701725a76ba93bf3d73c36f6b21fb04d5338dcf5"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:677e03c00f37c66cea033274295a983c7c546edea5043d0c798833adf4cf4c6f"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9f1476236b3eacfacfc0f66aa9e6cd39f2a624cb73ea99189556015f27c0bdeb"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f5a2f17c7d32991169195d52a04c95b256378bbf0de8cb98478351eb70d526f"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5fd0d4b7b1457c49e435ee1e437900ced9b35cb8dc5178921dfb7d98d65a08d0"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8034b19b993e9601f2ddf2c517451e17a6ab5cdb1c13fdff50c1442a7171d87"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cfb227b932ba8ef6e56c9f875d987973cd5e35bc5d05f5abf045af78ad8e098"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef0411eccfc3909269fed47c61ffebdcb84a04504bafa6b6df9b85c27e813b0"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:007dee844738c3d2e6c24ab5bc7d43c99ba3e1943bd2d95d598582e9c1b27750"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ce60058d3cdd8490e5e5471ef086b3f1e90ab872b548814e35930e21d848c9ce"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1085c455d1b3fdb8021ad534379c60353b81ba079712bce7a900e834859182fa"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:70edf6a93b19481affd287d696d9e311388d808671bc209fb8907b46a8c3af44"}, - {file = "ruff-0.6.8-py3-none-win32.whl", hash = "sha256:792213f7be25316f9b46b854df80a77e0da87ec66691e8f012f887b4a671ab5a"}, - {file = "ruff-0.6.8-py3-none-win_amd64.whl", hash = "sha256:ec0517dc0f37cad14a5319ba7bba6e7e339d03fbf967a6d69b0907d61be7a263"}, - {file = "ruff-0.6.8-py3-none-win_arm64.whl", hash = "sha256:8d3bb2e3fbb9875172119021a13eed38849e762499e3cfde9588e4b4d70968dc"}, - {file = "ruff-0.6.8.tar.gz", hash = "sha256:a5bf44b1aa0adaf6d9d20f86162b34f7c593bfedabc51239953e446aefc8ce18"}, + {file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"}, + {file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"}, + {file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"}, + {file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"}, + {file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"}, + {file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"}, + {file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"}, ] [[package]] @@ -3114,4 +3114,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "304d03b74d2886def69ae44ce5afaed21318db9f09aae91281e0f182e1660ffd" +content-hash = "c8a22f901970b2f851151e731532757fd3acf7ba02930952636d2e6c5c9c0c90" diff --git a/pyproject.toml b/pyproject.toml index 0246407f18..0e4f4c8406 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.6.8" +ruff = "0.6.9" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From 475e192cbef9a3ea7bc19eaa10de84322e7813a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:40:12 +0100 Subject: [PATCH 309/332] Bump tomli from 2.0.1 to 2.0.2 (#17796) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 79bf395401..929248ff8d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2535,13 +2535,13 @@ twisted = ["twisted"] [[package]] name = "tomli" -version = "2.0.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] From 165f4ca776239e1ed88a28b547345cd129a1d8ce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:41:03 +0100 Subject: [PATCH 310/332] Bump sentry-sdk from 2.14.0 to 2.15.0 (#17795) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 929248ff8d..4f05169465 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2334,13 +2334,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.14.0" +version = "2.15.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.14.0-py2.py3-none-any.whl", hash = "sha256:b8bc3dc51d06590df1291b7519b85c75e2ced4f28d9ea655b6d54033503b5bf4"}, - {file = "sentry_sdk-2.14.0.tar.gz", hash = "sha256:1e0e2eaf6dad918c7d1e0edac868a7bf20017b177f242cefe2a6bcd47955961d"}, + {file = "sentry_sdk-2.15.0-py2.py3-none-any.whl", hash = "sha256:8fb0d1a4e1a640172f31502e4503543765a1fe8a9209779134a4ac52d4677303"}, + {file = "sentry_sdk-2.15.0.tar.gz", hash = "sha256:a599e7d3400787d6f43327b973e55a087b931ba2c592a7a7afa691f8eb5e75e2"}, ] [package.dependencies] From 1bb528ee440c5f5c46c96a493c4a8a8a790a00f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:41:27 +0100 Subject: [PATCH 311/332] Bump phonenumbers from 8.13.46 to 8.13.47 (#17797) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4f05169465..caee5974c5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1447,13 +1447,13 @@ dev = ["jinja2"] [[package]] name = "phonenumbers" -version = "8.13.46" +version = "8.13.47" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.46-py2.py3-none-any.whl", hash = "sha256:519422d407af066fdbf98e179ea2e214487060f26526d67871f817eefbbb2134"}, - {file = "phonenumbers-8.13.46.tar.gz", hash = "sha256:94bf18ba9725bb6868d29473b13f78ef01e2585c5cb561ec0200be7676e77452"}, + {file = "phonenumbers-8.13.47-py2.py3-none-any.whl", hash = "sha256:5d3c0142ef7055ca5551884352e3b6b93bfe002a0bc95b8eaba39b0e2184541b"}, + {file = "phonenumbers-8.13.47.tar.gz", hash = "sha256:53c5e7c6d431cafe4efdd44956078404ae9bc8b0eacc47be3105d3ccc88aaffa"}, ] [[package]] From f40641c29b36ac42f22d1976ec02c38d8602afc6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:56:15 +0100 Subject: [PATCH 312/332] Bump sigstore/cosign-installer from 3.6.0 to 3.7.0 (#17798) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1a97809a26..ebf866e3d5 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -30,7 +30,7 @@ jobs: run: docker buildx inspect - name: Install Cosign - uses: sigstore/cosign-installer@v3.6.0 + uses: sigstore/cosign-installer@v3.7.0 - name: Checkout repository uses: actions/checkout@v4 From 6a0c21fabdc9232d2fb13eaecda2afe85beecebd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Oct 2024 15:04:20 +0100 Subject: [PATCH 313/332] Fixup changlog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index da8658c9a7..ad0d1043bd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,7 +7,7 @@ ### Bugfixes - Fix a rare bug introduced in v1.29.0 where invalidating a user's access token from a worker could raise an error. ([\#17779](https://github.com/element-hq/synapse/issues/17779)) -- In the response to `GET /_matrix/client/versions`, set the `unstable_features` flag for MSC4140 to `false` when server configuration disables support for delayed events. ([\#17780](https://github.com/element-hq/synapse/issues/17780)) +- In the response to `GET /_matrix/client/versions`, set the `unstable_features` flag for [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140) to `false` when server configuration disables support for delayed events. ([\#17780](https://github.com/element-hq/synapse/issues/17780)) - Improve input validation and room membership checks in admin redaction API. ([\#17792](https://github.com/element-hq/synapse/issues/17792)) ### Improved Documentation From bdcc9fa388262ece29f06973d601a99d3fee7be7 Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Tue, 8 Oct 2024 10:05:36 -0400 Subject: [PATCH 314/332] Fix incorrectly documented config path argument (#17802) --- changelog.d/17802.doc | 1 + docs/workers.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17802.doc diff --git a/changelog.d/17802.doc b/changelog.d/17802.doc new file mode 100644 index 0000000000..72e653d3c4 --- /dev/null +++ b/changelog.d/17802.doc @@ -0,0 +1 @@ +Correct documentation to refer to the `--config-path` argument instead of `--config-file`. diff --git a/docs/workers.md b/docs/workers.md index 51b22fef9b..0116c455bc 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou You can start the main Synapse process with Poetry by running the following command: ```console -poetry run synapse_homeserver --config-file [your homeserver.yaml] +poetry run synapse_homeserver --config-path [your homeserver.yaml] ``` For worker setups, you can run the following command ```console -poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml] +poetry run synapse_worker --config-path [your homeserver.yaml] --config-path [your worker.yaml] ``` ## Available worker applications From 60aebdb27edfe00f43b7e95bc60a730408bc3084 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Tue, 8 Oct 2024 19:32:25 +0200 Subject: [PATCH 315/332] Fix saving of non-RGB thumbnails as PNG (#17736) --- changelog.d/17736.bugfix | 1 + synapse/media/thumbnailer.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17736.bugfix diff --git a/changelog.d/17736.bugfix b/changelog.d/17736.bugfix new file mode 100644 index 0000000000..0d3fd06962 --- /dev/null +++ b/changelog.d/17736.bugfix @@ -0,0 +1 @@ +Fix saving of PNG thumbnails, when the original image is in the CMYK color space. diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index ee1118a53a..3845067835 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -206,7 +206,7 @@ class Thumbnailer: def _encode_image(self, output_image: Image.Image, output_type: str) -> BytesIO: output_bytes_io = BytesIO() fmt = self.FORMATS[output_type] - if fmt == "JPEG": + if fmt == "JPEG" or fmt == "PNG" and output_image.mode == "CMYK": output_image = output_image.convert("RGB") output_image.save(output_bytes_io, fmt, quality=80) return output_bytes_io From 05576f0b4b4a1ba5e756cbd1f0a064751f233d83 Mon Sep 17 00:00:00 2001 From: Nathan Date: Wed, 9 Oct 2024 14:21:08 +0200 Subject: [PATCH 316/332] Added display_name_claim in jwt_config which sets the user's display name upon registration (#17708) --- changelog.d/17708.feature | 1 + .../configuration/config_documentation.md | 3 +++ synapse/config/jwt.py | 2 ++ synapse/handlers/jwt.py | 16 +++++++++--- synapse/rest/client/login.py | 9 +++++-- tests/rest/client/test_login.py | 25 +++++++++++++++++++ 6 files changed, 50 insertions(+), 6 deletions(-) create mode 100644 changelog.d/17708.feature diff --git a/changelog.d/17708.feature b/changelog.d/17708.feature new file mode 100644 index 0000000000..90ec810f50 --- /dev/null +++ b/changelog.d/17708.feature @@ -0,0 +1 @@ +Added the `display_name_claim` option to the JWT configuration. This option allows specifying the claim key that contains the user's display name in the JWT payload. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 29f3528c7e..1de2f68865 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3722,6 +3722,8 @@ Additional sub-options for this setting include: Required if `enabled` is set to true. * `subject_claim`: Name of the claim containing a unique identifier for the user. Optional, defaults to `sub`. +* `display_name_claim`: Name of the claim containing the display name for the user. Optional. + If provided, the display name will be set to the value of this claim upon first login. * `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the "iss" claim will be required and validated for all JSON web tokens. * `audiences`: A list of audiences to validate the "aud" claim against. Optional. @@ -3736,6 +3738,7 @@ jwt_config: secret: "provided-by-your-issuer" algorithm: "provided-by-your-issuer" subject_claim: "name_of_claim" + display_name_claim: "name_of_claim" issuer: "provided-by-your-issuer" audiences: - "provided-by-your-issuer" diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py index b41f2dc08f..5c76551f33 100644 --- a/synapse/config/jwt.py +++ b/synapse/config/jwt.py @@ -38,6 +38,7 @@ class JWTConfig(Config): self.jwt_algorithm = jwt_config["algorithm"] self.jwt_subject_claim = jwt_config.get("subject_claim", "sub") + self.jwt_display_name_claim = jwt_config.get("display_name_claim") # The issuer and audiences are optional, if provided, it is asserted # that the claims exist on the JWT. @@ -49,5 +50,6 @@ class JWTConfig(Config): self.jwt_secret = None self.jwt_algorithm = None self.jwt_subject_claim = None + self.jwt_display_name_claim = None self.jwt_issuer = None self.jwt_audiences = None diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py index 5fa7a305ad..400f3a59aa 100644 --- a/synapse/handlers/jwt.py +++ b/synapse/handlers/jwt.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional, Tuple from authlib.jose import JsonWebToken, JWTClaims from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError @@ -36,11 +36,12 @@ class JwtHandler: self.jwt_secret = hs.config.jwt.jwt_secret self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim + self.jwt_display_name_claim = hs.config.jwt.jwt_display_name_claim self.jwt_algorithm = hs.config.jwt.jwt_algorithm self.jwt_issuer = hs.config.jwt.jwt_issuer self.jwt_audiences = hs.config.jwt.jwt_audiences - def validate_login(self, login_submission: JsonDict) -> str: + def validate_login(self, login_submission: JsonDict) -> Tuple[str, Optional[str]]: """ Authenticates the user for the /login API @@ -49,7 +50,8 @@ class JwtHandler: (including 'type' and other relevant fields) Returns: - The user ID that is logging in. + A tuple of (user_id, display_name) of the user that is logging in. + If the JWT does not contain a display name, the second element of the tuple will be None. Raises: LoginError if there was an authentication problem. @@ -109,4 +111,10 @@ class JwtHandler: if user is None: raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN) - return UserID(user, self.hs.hostname).to_string() + default_display_name = None + if self.jwt_display_name_claim: + display_name_claim = claims.get(self.jwt_display_name_claim) + if display_name_claim is not None: + default_display_name = display_name_claim + + return UserID(user, self.hs.hostname).to_string(), default_display_name diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 03b1e7edc4..3271b02d40 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -363,6 +363,7 @@ class LoginRestServlet(RestServlet): login_submission: JsonDict, callback: Optional[Callable[[LoginResponse], Awaitable[None]]] = None, create_non_existent_users: bool = False, + default_display_name: Optional[str] = None, ratelimit: bool = True, auth_provider_id: Optional[str] = None, should_issue_refresh_token: bool = False, @@ -410,7 +411,8 @@ class LoginRestServlet(RestServlet): canonical_uid = await self.auth_handler.check_user_exists(user_id) if not canonical_uid: canonical_uid = await self.registration_handler.register_user( - localpart=UserID.from_string(user_id).localpart + localpart=UserID.from_string(user_id).localpart, + default_display_name=default_display_name, ) user_id = canonical_uid @@ -546,11 +548,14 @@ class LoginRestServlet(RestServlet): Returns: The body of the JSON response. """ - user_id = self.hs.get_jwt_handler().validate_login(login_submission) + user_id, default_display_name = self.hs.get_jwt_handler().validate_login( + login_submission + ) return await self._complete_login( user_id, login_submission, create_non_existent_users=True, + default_display_name=default_display_name, should_issue_refresh_token=should_issue_refresh_token, request_info=request_info, ) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 2b1e44381b..cbd6d8d4bf 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -1047,6 +1047,7 @@ class JWTTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, + profile.register_servlets, ] jwt_secret = "secret" @@ -1202,6 +1203,30 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@frog:test") + @override_config( + {"jwt_config": {**base_config, "display_name_claim": "display_name"}} + ) + def test_login_custom_display_name(self) -> None: + """Test setting a custom display name.""" + localpart = "pinkie" + user_id = f"@{localpart}:test" + display_name = "Pinkie Pie" + + # Perform the login, specifying a custom display name. + channel = self.jwt_login({"sub": localpart, "display_name": display_name}) + self.assertEqual(channel.code, 200, msg=channel.result) + self.assertEqual(channel.json_body["user_id"], user_id) + + # Fetch the user's display name and check that it was set correctly. + access_token = channel.json_body["access_token"] + channel = self.make_request( + "GET", + f"/_matrix/client/v3/profile/{user_id}/displayname", + access_token=access_token, + ) + self.assertEqual(channel.code, 200, msg=channel.result) + self.assertEqual(channel.json_body["displayname"], display_name) + def test_login_no_token(self) -> None: params = {"type": "org.matrix.login.jwt"} channel = self.make_request(b"POST", LOGIN_URL, params) From f6a3e5e1c235884b0e550bceba55cd9311def2d0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 10 Oct 2024 09:59:01 +0100 Subject: [PATCH 317/332] Fix release script to check GH token (#17803) The current logic didn't work. --- changelog.d/17803.misc | 1 + scripts-dev/release.py | 36 +++++++++++++++++++++++------------- 2 files changed, 24 insertions(+), 13 deletions(-) create mode 100644 changelog.d/17803.misc diff --git a/changelog.d/17803.misc b/changelog.d/17803.misc new file mode 100644 index 0000000000..a267df8b83 --- /dev/null +++ b/changelog.d/17803.misc @@ -0,0 +1 @@ +Test github token before running release script steps. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 4435624267..b14b61c705 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -40,7 +40,7 @@ import commonmark import git from click.exceptions import ClickException from git import GitCommandError, Repo -from github import Github +from github import BadCredentialsException, Github from packaging import version @@ -323,10 +323,8 @@ def tag(gh_token: Optional[str]) -> None: def _tag(gh_token: Optional[str]) -> None: """Tags the release and generates a draft GitHub release""" - if gh_token: - # Test that the GH Token is valid before continuing. - gh = Github(gh_token) - gh.get_user() + # Test that the GH Token is valid before continuing. + check_valid_gh_token(gh_token) # Make sure we're in a git repo. repo = get_repo_and_check_clean_checkout() @@ -469,10 +467,8 @@ def upload(gh_token: Optional[str]) -> None: def _upload(gh_token: Optional[str]) -> None: """Upload release to pypi.""" - if gh_token: - # Test that the GH Token is valid before continuing. - gh = Github(gh_token) - gh.get_user() + # Test that the GH Token is valid before continuing. + check_valid_gh_token(gh_token) current_version = get_package_version() tag_name = f"v{current_version}" @@ -569,10 +565,8 @@ def wait_for_actions(gh_token: Optional[str]) -> None: def _wait_for_actions(gh_token: Optional[str]) -> None: - if gh_token: - # Test that the GH Token is valid before continuing. - gh = Github(gh_token) - gh.get_user() + # Test that the GH Token is valid before continuing. + check_valid_gh_token(gh_token) # Find out the version and tag name. current_version = get_package_version() @@ -806,6 +800,22 @@ def get_repo_and_check_clean_checkout( return repo +def check_valid_gh_token(gh_token: Optional[str]) -> None: + """Check that a github token is valid, if supplied""" + + if not gh_token: + # No github token supplied, so nothing to do. + return + + try: + gh = Github(gh_token) + + # We need to lookup name to trigger a request. + _name = gh.get_user().name + except BadCredentialsException as e: + raise click.ClickException(f"Github credentials are bad: {e}") + + def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]: """Find the branch/ref, looking first locally then in the remote.""" if ref_name in repo.references: From 451a9dc7b9284a5d0988318970e15ad41b34491f Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 14 Oct 2024 11:31:49 +0100 Subject: [PATCH 318/332] Clarify when 3PID invite module callbacks are called (#17627) Co-authored-by: Eric Eastwood --- changelog.d/17627.doc | 1 + docs/modules/spam_checker_callbacks.md | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17627.doc diff --git a/changelog.d/17627.doc b/changelog.d/17627.doc new file mode 100644 index 0000000000..487a0aea0d --- /dev/null +++ b/changelog.d/17627.doc @@ -0,0 +1 @@ +Clarify when the `user_may_invite` and `user_may_send_3pid_invite` module callbacks are called. \ No newline at end of file diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index ffdfe6082e..ec306d81ab 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -76,8 +76,9 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when processing an invitation. Both inviter and invitee are -represented by their Matrix user ID (e.g. `@alice:example.com`). +Called when processing an invitation, both when one is created locally or when +receiving an invite over federation. Both inviter and invitee are represented by +their Matrix user ID (e.g. `@alice:example.com`). The callback must return one of: @@ -112,7 +113,9 @@ async def user_may_send_3pid_invite( ``` Called when processing an invitation using a third-party identifier (also called a 3PID, -e.g. an email address or a phone number). +e.g. an email address or a phone number). It is only called when a 3PID invite is created +locally - not when one is received in a room over federation. If the 3PID is already associated +with a Matrix ID, the spam check will go through the `user_may_invite` callback instead. The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the invitee is represented by its medium (e.g. "email") and its address From 24975eca4dc32b2e8158256faccf30c8b1346a5e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 14 Oct 2024 11:34:33 +0100 Subject: [PATCH 319/332] Build debian packages for new Ubuntu versions (#17824) c.f. https://wiki.ubuntu.com/Releases for the currently supported Ubuntu releases. Note: this removes support for 23.04 and 23.10, which are EOL. Fixes #17811 --- changelog.d/17824.misc | 1 + scripts-dev/build_debian_packages.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17824.misc diff --git a/changelog.d/17824.misc b/changelog.d/17824.misc new file mode 100644 index 0000000000..22574f00ec --- /dev/null +++ b/changelog.d/17824.misc @@ -0,0 +1 @@ +Build debian packages for new Ubuntu versions, and stop building for no longer supported versions. diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index de2a134544..88c8419400 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -32,8 +32,8 @@ DISTS = ( "debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24) "ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04) - "ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24) - "ubuntu:mantic", # 23.10 (EOL 2024-07) (our EOL forced by Python 3.11 is 2027-10-24) + "ubuntu:noble", # 24.04 LTS (EOL 2029-06) + "ubuntu:oracular", # 24.10 (EOL 2025-07) "debian:trixie", # (EOL not specified yet) ) From 1266138b66b44f1d322f79c5e727238bbbf1e919 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:26:12 +0100 Subject: [PATCH 320/332] Bump sentry-sdk from 2.15.0 to 2.16.0 (#17829) --- poetry.lock | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index caee5974c5..3ad449b357 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2334,13 +2334,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.15.0" +version = "2.16.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.15.0-py2.py3-none-any.whl", hash = "sha256:8fb0d1a4e1a640172f31502e4503543765a1fe8a9209779134a4ac52d4677303"}, - {file = "sentry_sdk-2.15.0.tar.gz", hash = "sha256:a599e7d3400787d6f43327b973e55a087b931ba2c592a7a7afa691f8eb5e75e2"}, + {file = "sentry_sdk-2.16.0-py2.py3-none-any.whl", hash = "sha256:49139c31ebcd398f4f6396b18910610a0c1602f6e67083240c33019d1f6aa30c"}, + {file = "sentry_sdk-2.16.0.tar.gz", hash = "sha256:90f733b32e15dfc1999e6b7aca67a38688a567329de4d6e184154a73f96c6892"}, ] [package.dependencies] @@ -2363,6 +2363,7 @@ falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"] +http2 = ["httpcore[http2] (==1.*)"] httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] huggingface-hub = ["huggingface-hub (>=0.22)"] From 5dd6157972616b8764df96c32a54c6372acf86a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:26:23 +0100 Subject: [PATCH 321/332] Bump types-setuptools from 75.1.0.20240917 to 75.1.0.20241014 (#17828) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3ad449b357..d6132716b1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2834,13 +2834,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "75.1.0.20240917" +version = "75.1.0.20241014" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types-setuptools-75.1.0.20240917.tar.gz", hash = "sha256:12f12a165e7ed383f31def705e5c0fa1c26215dd466b0af34bd042f7d5331f55"}, - {file = "types_setuptools-75.1.0.20240917-py3-none-any.whl", hash = "sha256:06f78307e68d1bbde6938072c57b81cf8a99bc84bd6dc7e4c5014730b097dc0c"}, + {file = "types-setuptools-75.1.0.20241014.tar.gz", hash = "sha256:29b0560a8d4b4a91174be085847002c69abfcb048e20b33fc663005aedf56804"}, + {file = "types_setuptools-75.1.0.20241014-py3-none-any.whl", hash = "sha256:caab58366741fb99673d0138b6e2d760717f154cfb981b74fea5e8de40f0b703"}, ] [[package]] From ae6179b3826ecd0770dc6b13388715e0344c8b70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:26:40 +0100 Subject: [PATCH 322/332] Bump mypy-zope from 1.0.5 to 1.0.7 (#17827) --- poetry.lock | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index d6132716b1..9fd95ff447 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1377,16 +1377,17 @@ files = [ [[package]] name = "mypy-zope" -version = "1.0.5" +version = "1.0.7" description = "Plugin for mypy to support zope interfaces" optional = false python-versions = "*" files = [ - {file = "mypy_zope-1.0.5.tar.gz", hash = "sha256:2440406d49c0e1199c1cd819c92a2c4957de65579c6abc8a081c927f4bdc8d49"}, + {file = "mypy_zope-1.0.7-py3-none-any.whl", hash = "sha256:f19de249574319d81083b15f8a022c6b15583582f23340a860922141f1b651ca"}, + {file = "mypy_zope-1.0.7.tar.gz", hash = "sha256:32a79ce78647c0bea61e7e0c0eb1233fcb97bb94e8950cca73f17d3419c602f7"}, ] [package.dependencies] -mypy = ">=1.0.0,<1.11.0" +mypy = ">=1.0.0,<1.12.0" "zope.interface" = "*" "zope.schema" = "*" From d025b5ab508020740501ac8cca0da2fd99e89cee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 14 Oct 2024 13:31:22 +0100 Subject: [PATCH 323/332] Correctly changes to required state config in sliding sync (#17785) Fixes https://github.com/element-hq/synapse/issues/17698 This handles `required_state` changes by checking if new state has been added to the config, and if so fetching and returning that from the current state. This also takes care to ensure that given a state entry S that is added, removed and then re-added that we do *not* send S down a second time if there have been no changes to S in the current state. This is fine for Rust SDK (as it just remembers all state), but we might decide not to do this behaviour in the MSC. If we decide to always send down S then its easy enough to rip out all the code. --------- Co-authored-by: Eric Eastwood --- changelog.d/17785.bugfix | 1 + changelog.d/17805.bugfix | 1 + synapse/handlers/sliding_sync/__init__.py | 234 +++++- .../storage/databases/main/sliding_sync.py | 4 +- synapse/types/state.py | 7 + tests/handlers/test_sliding_sync.py | 694 +++++++++++++++++- .../sliding_sync/test_rooms_required_state.py | 261 +++++++ 7 files changed, 1188 insertions(+), 14 deletions(-) create mode 100644 changelog.d/17785.bugfix create mode 100644 changelog.d/17805.bugfix diff --git a/changelog.d/17785.bugfix b/changelog.d/17785.bugfix new file mode 100644 index 0000000000..df2898f54e --- /dev/null +++ b/changelog.d/17785.bugfix @@ -0,0 +1 @@ +Fix bug with sliding sync where the server would not return state that was added to the `required_state` config. diff --git a/changelog.d/17805.bugfix b/changelog.d/17805.bugfix new file mode 100644 index 0000000000..df2898f54e --- /dev/null +++ b/changelog.d/17805.bugfix @@ -0,0 +1 @@ +Fix bug with sliding sync where the server would not return state that was added to the `required_state` config. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 8c12cea8eb..39dba4ff98 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -14,7 +14,7 @@ import logging from itertools import chain -from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, Tuple +from typing import TYPE_CHECKING, AbstractSet, Dict, List, Mapping, Optional, Set, Tuple from prometheus_client import Histogram from typing_extensions import assert_never @@ -522,6 +522,8 @@ class SlidingSyncHandler: state_reset_out_of_room = True + prev_room_sync_config = previous_connection_state.room_configs.get(room_id) + # Determine whether we should limit the timeline to the token range. # # We should return historical messages (before token range) in the @@ -550,7 +552,6 @@ class SlidingSyncHandler: # or `limited` mean for clients that interpret them correctly. In future this # behavior is almost certainly going to change. # - # TODO: Also handle changes to `required_state` from_bound = None initial = True ignore_timeline_bound = False @@ -571,7 +572,6 @@ class SlidingSyncHandler: log_kv({"sliding_sync.room_status": room_status}) - prev_room_sync_config = previous_connection_state.room_configs.get(room_id) if prev_room_sync_config is not None: # Check if the timeline limit has increased, if so ignore the # timeline bound and record the change (see "XXX: Odd behavior" @@ -582,8 +582,6 @@ class SlidingSyncHandler: ): ignore_timeline_bound = True - # TODO: Check for changes in `required_state`` - log_kv( { "sliding_sync.from_bound": from_bound, @@ -997,6 +995,10 @@ class SlidingSyncHandler: include_others=required_state_filter.include_others, ) + # The required state map to store in the room sync config, if it has + # changed. + changed_required_state_map: Optional[Mapping[str, AbstractSet[str]]] = None + # We can return all of the state that was requested if this was the first # time we've sent the room down this connection. room_state: StateMap[EventBase] = {} @@ -1010,6 +1012,29 @@ class SlidingSyncHandler: else: assert from_bound is not None + if prev_room_sync_config is not None: + # Check if there are any changes to the required state config + # that we need to handle. + changed_required_state_map, added_state_filter = ( + _required_state_changes( + user.to_string(), + previous_room_config=prev_room_sync_config, + room_sync_config=room_sync_config, + state_deltas=room_state_delta_id_map, + ) + ) + + if added_state_filter: + # Some state entries got added, so we pull out the current + # state for them. If we don't do this we'd only send down new deltas. + state_ids = await self.get_current_state_ids_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=added_state_filter, + to_token=to_token, + ) + room_state_delta_id_map.update(state_ids) + events = await self.store.get_events( state_filter.filter_state(room_state_delta_id_map).values() ) @@ -1108,10 +1133,13 @@ class SlidingSyncHandler: # sensible order again. bump_stamp = 0 - unstable_expanded_timeline = False - prev_room_sync_config = previous_connection_state.room_configs.get(room_id) + room_sync_required_state_map_to_persist = room_sync_config.required_state_map + if changed_required_state_map: + room_sync_required_state_map_to_persist = changed_required_state_map + # Record the `room_sync_config` if we're `ignore_timeline_bound` (which means # that the `timeline_limit` has increased) + unstable_expanded_timeline = False if ignore_timeline_bound: # FIXME: We signal the fact that we're sending down more events to # the client by setting `unstable_expanded_timeline` to true (see @@ -1120,7 +1148,7 @@ class SlidingSyncHandler: new_connection_state.room_configs[room_id] = RoomSyncConfig( timeline_limit=room_sync_config.timeline_limit, - required_state_map=room_sync_config.required_state_map, + required_state_map=room_sync_required_state_map_to_persist, ) elif prev_room_sync_config is not None: # If the result is `limited` then we need to record that the @@ -1149,10 +1177,14 @@ class SlidingSyncHandler: ): new_connection_state.room_configs[room_id] = RoomSyncConfig( timeline_limit=room_sync_config.timeline_limit, - required_state_map=room_sync_config.required_state_map, + required_state_map=room_sync_required_state_map_to_persist, ) - # TODO: Record changes in required_state. + elif changed_required_state_map is not None: + new_connection_state.room_configs[room_id] = RoomSyncConfig( + timeline_limit=room_sync_config.timeline_limit, + required_state_map=room_sync_required_state_map_to_persist, + ) else: new_connection_state.room_configs[room_id] = room_sync_config @@ -1285,3 +1317,185 @@ class SlidingSyncHandler: return new_bump_event_pos.stream return None + + +def _required_state_changes( + user_id: str, + *, + previous_room_config: "RoomSyncConfig", + room_sync_config: RoomSyncConfig, + state_deltas: StateMap[str], +) -> Tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]: + """Calculates the changes between the required state room config from the + previous requests compared with the current request. + + This does two things. First, it calculates if we need to update the room + config due to changes to required state. Secondly, it works out which state + entries we need to pull from current state and return due to the state entry + now appearing in the required state when it previously wasn't (on top of the + state deltas). + + This function tries to ensure to handle the case where a state entry is + added, removed and then added again to the required state. In that case we + only want to re-send that entry down sync if it has changed. + + Returns: + A 2-tuple of updated required state config (or None if there is no update) + and the state filter to use to fetch extra current state that we need to + return. + """ + + prev_required_state_map = previous_room_config.required_state_map + request_required_state_map = room_sync_config.required_state_map + + if prev_required_state_map == request_required_state_map: + # There has been no change. Return immediately. + return None, StateFilter.none() + + prev_wildcard = prev_required_state_map.get(StateValues.WILDCARD, set()) + request_wildcard = request_required_state_map.get(StateValues.WILDCARD, set()) + + # If we were previously fetching everything ("*", "*"), always update the effective + # room required state config to match the request. And since we we're previously + # already fetching everything, we don't have to fetch anything now that they've + # narrowed. + if StateValues.WILDCARD in prev_wildcard: + return request_required_state_map, StateFilter.none() + + # If a event type wildcard has been added or removed we don't try and do + # anything fancy, and instead always update the effective room required + # state config to match the request. + if request_wildcard - prev_wildcard: + # Some keys were added, so we need to fetch everything + return request_required_state_map, StateFilter.all() + if prev_wildcard - request_wildcard: + # Keys were only removed, so we don't have to fetch everything. + return request_required_state_map, StateFilter.none() + + # Contains updates to the required state map compared with the previous room + # config. This has the same format as `RoomSyncConfig.required_state` + changes: Dict[str, AbstractSet[str]] = {} + + # The set of types/state keys that we need to fetch and return to the + # client. Passed to `StateFilter.from_types(...)` + added: List[Tuple[str, Optional[str]]] = [] + + # First we calculate what, if anything, has been *added*. + for event_type in ( + prev_required_state_map.keys() | request_required_state_map.keys() + ): + old_state_keys = prev_required_state_map.get(event_type, set()) + request_state_keys = request_required_state_map.get(event_type, set()) + + if old_state_keys == request_state_keys: + # No change to this type + continue + + if not request_state_keys - old_state_keys: + # Nothing *added*, so we skip. Removals happen below. + continue + + # Always update changes to include the newly added keys + changes[event_type] = request_state_keys + + if StateValues.WILDCARD in old_state_keys: + # We were previously fetching everything for this type, so we don't need to + # fetch anything new. + continue + + # Record the new state keys to fetch for this type. + if StateValues.WILDCARD in request_state_keys: + # If we have added a wildcard then we always just fetch everything. + added.append((event_type, None)) + else: + for state_key in request_state_keys - old_state_keys: + if state_key == StateValues.ME: + added.append((event_type, user_id)) + elif state_key == StateValues.LAZY: + # We handle lazy loading separately (outside this function), + # so don't need to explicitly add anything here. + # + # LAZY values should also be ignore for event types that are + # not membership. + pass + else: + added.append((event_type, state_key)) + + added_state_filter = StateFilter.from_types(added) + + # Convert the list of state deltas to map from type to state_keys that have + # changed. + changed_types_to_state_keys: Dict[str, Set[str]] = {} + for event_type, state_key in state_deltas: + changed_types_to_state_keys.setdefault(event_type, set()).add(state_key) + + # Figure out what changes we need to apply to the effective required state + # config. + for event_type, changed_state_keys in changed_types_to_state_keys.items(): + old_state_keys = prev_required_state_map.get(event_type, set()) + request_state_keys = request_required_state_map.get(event_type, set()) + + if old_state_keys == request_state_keys: + # No change. + continue + + if request_state_keys - old_state_keys: + # We've expanded the set of state keys, so we just clobber the + # current set with the new set. + # + # We could also ensure that we keep entries where the state hasn't + # changed, but are no longer in the requested required state, but + # that's a sufficient edge case that we can ignore (as its only a + # performance optimization). + changes[event_type] = request_state_keys + continue + + old_state_key_wildcard = StateValues.WILDCARD in old_state_keys + request_state_key_wildcard = StateValues.WILDCARD in request_state_keys + + if old_state_key_wildcard != request_state_key_wildcard: + # If a state_key wildcard has been added or removed, we always update the + # effective room required state config to match the request. + changes[event_type] = request_state_keys + continue + + if event_type == EventTypes.Member: + old_state_key_lazy = StateValues.LAZY in old_state_keys + request_state_key_lazy = StateValues.LAZY in request_state_keys + + if old_state_key_lazy != request_state_key_lazy: + # If a "$LAZY" has been added or removed we always update the effective room + # required state config to match the request. + changes[event_type] = request_state_keys + continue + + # Handle "$ME" values by adding "$ME" if the state key matches the user + # ID. + if user_id in changed_state_keys: + changed_state_keys.add(StateValues.ME) + + # At this point there are no wildcards and no additions to the set of + # state keys requested, only deletions. + # + # We only remove state keys from the effective state if they've been + # removed from the request *and* the state has changed. This ensures + # that if a client removes and then re-adds a state key, we only send + # down the associated current state event if its changed (rather than + # sending down the same event twice). + invalidated = (old_state_keys - request_state_keys) & changed_state_keys + if invalidated: + changes[event_type] = old_state_keys - invalidated + + if changes: + # Update the required state config based on the changes. + new_required_state_map = dict(prev_required_state_map) + for event_type, state_keys in changes.items(): + if state_keys: + new_required_state_map[event_type] = state_keys + else: + # Remove entries with empty state keys. + new_required_state_map.pop(event_type, None) + + return new_required_state_map, added_state_filter + else: + return None, added_state_filter diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index f2df37fec1..7b357c1ffe 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -386,8 +386,8 @@ class SlidingSyncStore(SQLBaseStore): required_state_map: Dict[int, Dict[str, Set[str]]] = {} for row in rows: state = required_state_map[row[0]] = {} - for event_type, state_keys in db_to_json(row[1]): - state[event_type] = set(state_keys) + for event_type, state_key in db_to_json(row[1]): + state.setdefault(event_type, set()).add(state_key) # Get all the room configs, looking up the required state from the map # above. diff --git a/synapse/types/state.py b/synapse/types/state.py index 1141c4b5c1..67d1c3fe97 100644 --- a/synapse/types/state.py +++ b/synapse/types/state.py @@ -616,6 +616,13 @@ class StateFilter: return False + def __bool__(self) -> bool: + """Returns true if this state filter will match any state, or false if + this is the empty filter""" + if self.include_others: + return True + return bool(self.types) + _ALL_STATE_FILTER = StateFilter(types=immutabledict(), include_others=True) _ALL_NON_MEMBER_STATE_FILTER = StateFilter( diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index e2c7a94ce2..9a68d1dd95 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -18,9 +18,10 @@ # # import logging -from typing import AbstractSet, Dict, Optional, Tuple +from typing import AbstractSet, Dict, Mapping, Optional, Set, Tuple from unittest.mock import patch +import attr from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor @@ -35,15 +36,18 @@ from synapse.handlers.sliding_sync import ( RoomsForUserType, RoomSyncConfig, StateValues, + _required_state_changes, ) from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.types import JsonDict, StreamToken, UserID +from synapse.types import JsonDict, StateMap, StreamToken, UserID from synapse.types.handlers.sliding_sync import SlidingSyncConfig +from synapse.types.state import StateFilter from synapse.util import Clock +from tests import unittest from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.unittest import HomeserverTestCase, TestCase @@ -3213,3 +3217,689 @@ class SortRoomsTestCase(HomeserverTestCase): # We only care about the *latest* event in the room. [room_id1, room_id2], ) + + +@attr.s(slots=True, auto_attribs=True, frozen=True) +class RequiredStateChangesTestParameters: + previous_required_state_map: Dict[str, Set[str]] + request_required_state_map: Dict[str, Set[str]] + state_deltas: StateMap[str] + expected_with_state_deltas: Tuple[ + Optional[Mapping[str, AbstractSet[str]]], StateFilter + ] + expected_without_state_deltas: Tuple[ + Optional[Mapping[str, AbstractSet[str]]], StateFilter + ] + + +class RequiredStateChangesTestCase(unittest.TestCase): + """Test cases for `_required_state_changes`""" + + @parameterized.expand( + [ + ( + "simple_no_change", + """Test no change to required state""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key"}}, + request_required_state_map={"type1": {"state_key"}}, + state_deltas={("type1", "state_key"): "$event_id"}, + # No changes + expected_with_state_deltas=(None, StateFilter.none()), + expected_without_state_deltas=(None, StateFilter.none()), + ), + ), + ( + "simple_add_type", + """Test adding a type to the config""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key"}}, + request_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {"state_key"}, "type2": {"state_key"}}, + # We should see the new type added + StateFilter.from_types([("type2", "state_key")]), + ), + expected_without_state_deltas=( + {"type1": {"state_key"}, "type2": {"state_key"}}, + StateFilter.from_types([("type2", "state_key")]), + ), + ), + ), + ( + "simple_add_type_from_nothing", + """Test adding a type to the config when previously requesting nothing""", + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {"state_key"}, "type2": {"state_key"}}, + # We should see the new types added + StateFilter.from_types( + [("type1", "state_key"), ("type2", "state_key")] + ), + ), + expected_without_state_deltas=( + {"type1": {"state_key"}, "type2": {"state_key"}}, + StateFilter.from_types( + [("type1", "state_key"), ("type2", "state_key")] + ), + ), + ), + ), + ( + "simple_add_state_key", + """Test adding a state key to the config""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type": {"state_key1"}}, + request_required_state_map={"type": {"state_key1", "state_key2"}}, + state_deltas={("type", "state_key2"): "$event_id"}, + expected_with_state_deltas=( + # We've added a key so we should persist the changed required state + # config. + {"type": {"state_key1", "state_key2"}}, + # We should see the new state_keys added + StateFilter.from_types([("type", "state_key2")]), + ), + expected_without_state_deltas=( + {"type": {"state_key1", "state_key2"}}, + StateFilter.from_types([("type", "state_key2")]), + ), + ), + ), + ( + "simple_remove_type", + """ + Test removing a type from the config when there are a matching state + delta does cause the persisted required state config to change + + Test removing a type from the config when there are no matching state + deltas does *not* cause the persisted required state config to change + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + request_required_state_map={"type1": {"state_key"}}, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type2` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type2`, we see that we haven't sent it before + # and send the new state. (we should still keep track that we've + # sent `type1` before). + {"type1": {"state_key"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type2` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type2` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "simple_remove_type_to_nothing", + """ + Test removing a type from the config and no longer requesting any state + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + request_required_state_map={}, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type2` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type2`, we see that we haven't sent it before + # and send the new state. (we should still keep track that we've + # sent `type1` before). + {"type1": {"state_key"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type2` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type2` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "simple_remove_state_key", + """ + Test removing a state_key from the config + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type": {"state_key1", "state_key2"}}, + request_required_state_map={"type": {"state_key1"}}, + state_deltas={("type", "state_key2"): "$event_id"}, + expected_with_state_deltas=( + # Remove `(type, state_key2)` since there's been a change + # to that state (persist the change to required state). + # That way next time, they request `(type, state_key2)`, we see + # that we haven't sent it before and send the new state. (we + # should still keep track that we've sent `(type, state_key1)` + # before). + {"type": {"state_key1"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `(type, state_key2)` is no longer requested but since that + # state hasn't changed, nothing should change (we should still + # keep track that we've sent `(type, state_key1)` and `(type, + # state_key2)` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "type_wildcards_add", + """ + Test adding a wildcard type causes the persisted required state config + to change and we request everything. + + If a event type wildcard has been added or removed we don't try and do + anything fancy, and instead always update the effective room required + state config to match the request. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key2"}}, + request_required_state_map={ + "type1": {"state_key2"}, + StateValues.WILDCARD: {"state_key"}, + }, + state_deltas={ + ("other_type", "state_key"): "$event_id", + }, + # We've added a wildcard, so we persist the change and request everything + expected_with_state_deltas=( + {"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}}, + StateFilter.all(), + ), + expected_without_state_deltas=( + {"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}}, + StateFilter.all(), + ), + ), + ), + ( + "type_wildcards_remove", + """ + Test removing a wildcard type causes the persisted required state config + to change and request nothing. + + If a event type wildcard has been added or removed we don't try and do + anything fancy, and instead always update the effective room required + state config to match the request. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key2"}, + StateValues.WILDCARD: {"state_key"}, + }, + request_required_state_map={"type1": {"state_key2"}}, + state_deltas={ + ("other_type", "state_key"): "$event_id", + }, + # We've removed a type wildcard, so we persist the change but don't request anything + expected_with_state_deltas=( + {"type1": {"state_key2"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + {"type1": {"state_key2"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_wildcards_add", + """Test adding a wildcard state_key""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key"}}, + request_required_state_map={ + "type1": {"state_key"}, + "type2": {StateValues.WILDCARD}, + }, + state_deltas={("type2", "state_key"): "$event_id"}, + # We've added a wildcard state_key, so we persist the change and + # request all of the state for that type + expected_with_state_deltas=( + {"type1": {"state_key"}, "type2": {StateValues.WILDCARD}}, + StateFilter.from_types([("type2", None)]), + ), + expected_without_state_deltas=( + {"type1": {"state_key"}, "type2": {StateValues.WILDCARD}}, + StateFilter.from_types([("type2", None)]), + ), + ), + ), + ( + "state_key_wildcards_remove", + """Test removing a wildcard state_key""", + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key"}, + "type2": {StateValues.WILDCARD}, + }, + request_required_state_map={"type1": {"state_key"}}, + state_deltas={("type2", "state_key"): "$event_id"}, + # We've removed a state_key wildcard, so we persist the change and + # request nothing + expected_with_state_deltas=( + {"type1": {"state_key"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + # We've removed a state_key wildcard but there have been no matching + # state changes, so no changes needed, just persist the + # `request_required_state_map` as-is. + expected_without_state_deltas=( + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_remove_some", + """ + Test that removing state keys work when only some of the state keys have + changed + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key1", "state_key2", "state_key3"} + }, + request_required_state_map={"type1": {"state_key1"}}, + state_deltas={("type1", "state_key3"): "$event_id"}, + expected_with_state_deltas=( + # We've removed some state keys from the type, but only state_key3 was + # changed so only that one should be removed. + {"type1": {"state_key1", "state_key2"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # No changes needed, just persist the + # `request_required_state_map` as-is + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_me_add", + """ + Test adding state keys work when using "$ME" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={"type1": {StateValues.ME}}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {StateValues.ME}}, + # We should see the new state_keys added + StateFilter.from_types([("type1", "@user:test")]), + ), + expected_without_state_deltas=( + {"type1": {StateValues.ME}}, + StateFilter.from_types([("type1", "@user:test")]), + ), + ), + ), + ( + "state_key_me_remove", + """ + Test removing state keys work when using "$ME" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {StateValues.ME}}, + request_required_state_map={}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type1` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type1`, we see that we haven't sent it before + # and send the new state. (if we were tracking that we sent any + # other state, we should still keep track that). + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type1` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type1` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_user_id_add", + """ + Test adding state keys work when using your own user ID + """, + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={"type1": {"@user:test"}}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {"@user:test"}}, + # We should see the new state_keys added + StateFilter.from_types([("type1", "@user:test")]), + ), + expected_without_state_deltas=( + {"type1": {"@user:test"}}, + StateFilter.from_types([("type1", "@user:test")]), + ), + ), + ), + ( + "state_key_me_remove", + """ + Test removing state keys work when using your own user ID + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"@user:test"}}, + request_required_state_map={}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type1` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type1`, we see that we haven't sent it before + # and send the new state. (if we were tracking that we sent any + # other state, we should still keep track that). + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type1` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type1` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_lazy_add", + """ + Test adding state keys work when using "$LAZY" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={EventTypes.Member: {StateValues.LAZY}}, + state_deltas={(EventTypes.Member, "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # If a "$LAZY" has been added or removed we always update the + # required state to what was requested for simplicity. + {EventTypes.Member: {StateValues.LAZY}}, + StateFilter.none(), + ), + expected_without_state_deltas=( + {EventTypes.Member: {StateValues.LAZY}}, + StateFilter.none(), + ), + ), + ), + ( + "state_key_lazy_remove", + """ + Test removing state keys work when using "$LAZY" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={EventTypes.Member: {StateValues.LAZY}}, + request_required_state_map={}, + state_deltas={(EventTypes.Member, "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # If a "$LAZY" has been added or removed we always update the + # required state to what was requested for simplicity. + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `EventTypes.Member` is no longer requested but since that + # state hasn't changed, nothing should change (we should still + # keep track that we've sent `EventTypes.Member` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "type_wildcard_with_state_key_wildcard_to_explicit_state_keys", + """ + Test switching from a wildcard ("*", "*") to explicit state keys + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + StateValues.WILDCARD: {StateValues.WILDCARD} + }, + request_required_state_map={ + StateValues.WILDCARD: {"state_key1", "state_key2", "state_key3"} + }, + state_deltas={("type1", "state_key1"): "$event_id"}, + # If we were previously fetching everything ("*", "*"), always update the effective + # room required state config to match the request. And since we we're previously + # already fetching everything, we don't have to fetch anything now that they've + # narrowed. + expected_with_state_deltas=( + { + StateValues.WILDCARD: { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + expected_without_state_deltas=( + { + StateValues.WILDCARD: { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + ), + ), + ( + "type_wildcard_with_explicit_state_keys_to_wildcard_state_key", + """ + Test switching from explicit to wildcard state keys ("*", "*") + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + StateValues.WILDCARD: {"state_key1", "state_key2", "state_key3"} + }, + request_required_state_map={ + StateValues.WILDCARD: {StateValues.WILDCARD} + }, + state_deltas={("type1", "state_key1"): "$event_id"}, + # We've added a wildcard, so we persist the change and request everything + expected_with_state_deltas=( + {StateValues.WILDCARD: {StateValues.WILDCARD}}, + StateFilter.all(), + ), + expected_without_state_deltas=( + {StateValues.WILDCARD: {StateValues.WILDCARD}}, + StateFilter.all(), + ), + ), + ), + ( + "state_key_wildcard_to_explicit_state_keys", + """Test switching from a wildcard to explicit state keys with a concrete type""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {StateValues.WILDCARD}}, + request_required_state_map={ + "type1": {"state_key1", "state_key2", "state_key3"} + }, + state_deltas={("type1", "state_key1"): "$event_id"}, + # If a state_key wildcard has been added or removed, we always + # update the effective room required state config to match the + # request. And since we we're previously already fetching + # everything, we don't have to fetch anything now that they've + # narrowed. + expected_with_state_deltas=( + { + "type1": { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + expected_without_state_deltas=( + { + "type1": { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + ), + ), + ( + "state_key_wildcard_to_explicit_state_keys", + """Test switching from a wildcard to explicit state keys with a concrete type""", + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key1", "state_key2", "state_key3"} + }, + request_required_state_map={"type1": {StateValues.WILDCARD}}, + state_deltas={("type1", "state_key1"): "$event_id"}, + # If a state_key wildcard has been added or removed, we always + # update the effective room required state config to match the + # request. And we need to request all of the state for that type + # because we previously, only sent down a few keys. + expected_with_state_deltas=( + {"type1": {StateValues.WILDCARD}}, + StateFilter.from_types([("type1", None)]), + ), + expected_without_state_deltas=( + {"type1": {StateValues.WILDCARD}}, + StateFilter.from_types([("type1", None)]), + ), + ), + ), + ] + ) + def test_xxx( + self, + _test_label: str, + _test_description: str, + test_parameters: RequiredStateChangesTestParameters, + ) -> None: + # Without `state_deltas` + changed_required_state_map, added_state_filter = _required_state_changes( + user_id="@user:test", + previous_room_config=RoomSyncConfig( + timeline_limit=0, + required_state_map=test_parameters.previous_required_state_map, + ), + room_sync_config=RoomSyncConfig( + timeline_limit=0, + required_state_map=test_parameters.request_required_state_map, + ), + state_deltas={}, + ) + + self.assertEqual( + changed_required_state_map, + test_parameters.expected_without_state_deltas[0], + "changed_required_state_map does not match (without state_deltas)", + ) + self.assertEqual( + added_state_filter, + test_parameters.expected_without_state_deltas[1], + "added_state_filter does not match (without state_deltas)", + ) + + # With `state_deltas` + changed_required_state_map, added_state_filter = _required_state_changes( + user_id="@user:test", + previous_room_config=RoomSyncConfig( + timeline_limit=0, + required_state_map=test_parameters.previous_required_state_map, + ), + room_sync_config=RoomSyncConfig( + timeline_limit=0, + required_state_map=test_parameters.request_required_state_map, + ), + state_deltas=test_parameters.state_deltas, + ) + + self.assertEqual( + changed_required_state_map, + test_parameters.expected_with_state_deltas[0], + "changed_required_state_map does not match (with state_deltas)", + ) + self.assertEqual( + added_state_filter, + test_parameters.expected_with_state_deltas[1], + "added_state_filter does not match (with state_deltas)", + ) diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index 91ac6c5a0e..7da51d4954 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -862,3 +862,264 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): exact=True, message=f"Expected only fully-stated rooms to show up for test_key={list_key}.", ) + + def test_rooms_required_state_expand(self) -> None: + """Test that when we expand the required state argument we get the + expanded state, and not just the changes to the new expanded.""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with a room name. + room_id1 = self.helper.create_room_as( + user1_id, tok=user1_tok, extra_content={"name": "Foo"} + ) + + # Only request the state event to begin with + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the room name, even though there haven't been any + # changes. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # We should not see any state changes. + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + + def test_rooms_required_state_expand_retract_expand(self) -> None: + """Test that when expanding, retracting and then expanding the required + state, we get the changes that happened.""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with a room name. + room_id1 = self.helper.create_room_as( + user1_id, tok=user1_tok, extra_content={"name": "Foo"} + ) + + # Only request the state event to begin with + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the room name, even though there haven't been any + # changes. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + # Update the room name + self.helper.send_state( + room_id1, "m.room.name", {"name": "Bar"}, state_key="", tok=user1_tok + ) + + # Update the sliding sync requests to exclude the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should not see the updated room name in state (though it will be in + # the timeline). + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the *new* room name, even though there haven't been any + # changes. + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + def test_rooms_required_state_expand_deduplicate(self) -> None: + """Test that when expanding, retracting and then expanding the required + state, we don't get the state down again if it hasn't changed""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with a room name. + room_id1 = self.helper.create_room_as( + user1_id, tok=user1_tok, extra_content={"name": "Foo"} + ) + + # Only request the state event to begin with + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the room name, even though there haven't been any + # changes. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to exclude the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should not see any state updates + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should not see the room name again, as we have already sent that + # down. + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) From 5d47138b4677658da2b52d24f9e85b4742f6dc1d Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:34:55 +0100 Subject: [PATCH 324/332] Fix typo in `target_cache_memory_usage` docs (#17825) --- changelog.d/17825.doc | 1 + docs/usage/configuration/config_documentation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17825.doc diff --git a/changelog.d/17825.doc b/changelog.d/17825.doc new file mode 100644 index 0000000000..ee43667417 --- /dev/null +++ b/changelog.d/17825.doc @@ -0,0 +1 @@ +Fix typo in `target_cache_memory_usage` docs. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 1de2f68865..47e3ef1287 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1434,7 +1434,7 @@ number of entries that can be stored. Please see the [Config Conventions](#config-conventions) for information on how to specify memory size and cache expiry durations. * `max_cache_memory_usage` sets a ceiling on how much memory the cache can use before caches begin to be continuously evicted. - They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in + They will continue to be evicted until the memory usage drops below the `target_cache_memory_usage`, set in the setting below, or until the `min_cache_ttl` is hit. There is no default value for this option. * `target_cache_memory_usage` sets a rough target for the desired memory usage of the caches. There is no default value for this option. From adda2a4613cb67d4329681e5c5eb7867a17e021d Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 14 Oct 2024 07:47:35 -0500 Subject: [PATCH 325/332] Sliding Sync: Slight optimization when fetching state for the room (`get_events_as_list(...)`) (#17718) Spawning from @kegsay [pointing out](https://matrix.to/#/!cnVVNLKqgUzNTOFQkz:matrix.org/$ExOO7J8uPUQSyH-9Uxc_QCa8jlXX9uK4VRtkSC0EI3o?via=element.io&via=matrix.org&via=jki.re) that the Sliding Sync endpoint doesn't handle a large room with a lot of state well on initial sync (requesting all state via `required_state: [ ["*","*"] ]`) (it just takes forever). After investigating further, the slow part is just `get_events_as_list(...)` fetching all of the current state ID's out for the room (which can be 100k+ events for rooms with a lot of membership). This is just a slow thing in Synapse in general and the same thing happens in Sync v2 or the `/state` endpoint. --- The only idea I had to improve things was to use `batch_iter` to only try fetching a fixed amount at a time instead of working with large maps, lists, and sets. This doesn't seem to have much effect though. There is already a `batch_iter(event_ids, 200)` in `_fetch_event_rows(...)` for when we actually have to touch the database and that's inside a queue to deduplicate work. I did notice one slight optimization to use `get_events_as_list(...)` directly instead of `get_events(...)`. `get_events(...)` just turns the result from `get_events_as_list(...)` into a dict and since we're just iterating over the events, we don't need the dict/map. --- changelog.d/17718.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 8 ++- .../storage/databases/main/events_worker.py | 41 ++++++++++++++-- .../databases/main/test_events_worker.py | 49 ++++++++++++++++++- 4 files changed, 89 insertions(+), 10 deletions(-) create mode 100644 changelog.d/17718.misc diff --git a/changelog.d/17718.misc b/changelog.d/17718.misc new file mode 100644 index 0000000000..ea73a03f53 --- /dev/null +++ b/changelog.d/17718.misc @@ -0,0 +1 @@ +Slight optimization when fetching state/events for Sliding Sync. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 39dba4ff98..a1a6728fb9 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -452,13 +452,11 @@ class SlidingSyncHandler: to_token=to_token, ) - event_map = await self.store.get_events(list(state_ids.values())) + events = await self.store.get_events_as_list(list(state_ids.values())) state_map = {} - for key, event_id in state_ids.items(): - event = event_map.get(event_id) - if event: - state_map[key] = event + for event in events: + state_map[(event.type, event.state_key)] = event return state_map diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index c029228422..403407068c 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -61,7 +61,13 @@ from synapse.logging.context import ( current_context, make_deferred_yieldable, ) -from synapse.logging.opentracing import start_active_span, tag_args, trace +from synapse.logging.opentracing import ( + SynapseTags, + set_tag, + start_active_span, + tag_args, + trace, +) from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, @@ -525,6 +531,7 @@ class EventsWorkerStore(SQLBaseStore): return event + @trace async def get_events( self, event_ids: Collection[str], @@ -556,6 +563,11 @@ class EventsWorkerStore(SQLBaseStore): Returns: A mapping from event_id to event. """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + events = await self.get_events_as_list( event_ids, redact_behaviour=redact_behaviour, @@ -603,6 +615,10 @@ class EventsWorkerStore(SQLBaseStore): Note that the returned list may be smaller than the list of event IDs if not all events could be fetched. """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) if not event_ids: return [] @@ -723,10 +739,11 @@ class EventsWorkerStore(SQLBaseStore): return events + @trace @cancellable async def get_unredacted_events_from_cache_or_db( self, - event_ids: Iterable[str], + event_ids: Collection[str], allow_rejected: bool = False, ) -> Dict[str, EventCacheEntry]: """Fetch a bunch of events from the cache or the database. @@ -748,6 +765,11 @@ class EventsWorkerStore(SQLBaseStore): Returns: map from event id to result """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + # Shortcut: check if we have any events in the *in memory* cache - this function # may be called repeatedly for the same event so at this point we cannot reach # out to any external cache for performance reasons. The external cache is @@ -936,7 +958,7 @@ class EventsWorkerStore(SQLBaseStore): events, update_metrics=update_metrics ) - missing_event_ids = (e for e in events if e not in event_map) + missing_event_ids = [e for e in events if e not in event_map] event_map.update( await self._get_events_from_external_cache( events=missing_event_ids, @@ -946,8 +968,9 @@ class EventsWorkerStore(SQLBaseStore): return event_map + @trace async def _get_events_from_external_cache( - self, events: Iterable[str], update_metrics: bool = True + self, events: Collection[str], update_metrics: bool = True ) -> Dict[str, EventCacheEntry]: """Fetch events from any configured external cache. @@ -957,6 +980,10 @@ class EventsWorkerStore(SQLBaseStore): events: list of event_ids to fetch update_metrics: Whether to update the cache hit ratio metrics """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "events.length", + str(len(events)), + ) event_map = {} for event_id in events: @@ -1222,6 +1249,7 @@ class EventsWorkerStore(SQLBaseStore): with PreserveLoggingContext(): self.hs.get_reactor().callFromThread(fire_errback, e) + @trace async def _get_events_from_db( self, event_ids: Collection[str] ) -> Dict[str, EventCacheEntry]: @@ -1240,6 +1268,11 @@ class EventsWorkerStore(SQLBaseStore): map from event id to result. May return extra events which weren't asked for. """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + fetched_event_ids: Set[str] = set() fetched_events: Dict[str, _EventRow] = {} diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index fd1f5e7fd5..104d141a72 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -20,7 +20,7 @@ # import json from contextlib import contextmanager -from typing import Generator, List, Tuple +from typing import Generator, List, Set, Tuple from unittest import mock from twisted.enterprise.adbapi import ConnectionPool @@ -295,6 +295,53 @@ class EventCacheTestCase(unittest.HomeserverTestCase): self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1) +class GetEventsTestCase(unittest.HomeserverTestCase): + """Test `get_events(...)`/`get_events_as_list(...)`""" + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store: EventsWorkerStore = hs.get_datastores().main + + def test_get_lots_of_messages(self) -> None: + """Sanity check that `get_events(...)`/`get_events_as_list(...)` works""" + num_events = 100 + + user_id = self.register_user("user", "pass") + user_tok = self.login(user_id, "pass") + + room_id = self.helper.create_room_as(user_id, tok=user_tok) + + event_ids: Set[str] = set() + for i in range(num_events): + event = self.get_success( + inject_event( + self.hs, + room_id=room_id, + type="m.room.message", + sender=user_id, + content={ + "body": f"foo{i}", + "msgtype": "m.text", + }, + ) + ) + event_ids.add(event.event_id) + + # Sanity check that we actually created the events + self.assertEqual(len(event_ids), num_events) + + # This is the function under test + fetched_event_map = self.get_success(self.store.get_events(event_ids)) + + # Sanity check that we got the events back + self.assertIncludes(fetched_event_map.keys(), event_ids, exact=True) + + class DatabaseOutageTestCase(unittest.HomeserverTestCase): """Test event fetching during a database outage.""" From c5b379de660927f64bf0952c1c6ce5a497ad6aea Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:49:43 +0100 Subject: [PATCH 326/332] Enable the `.org.matrix.msc4028.encrypted_event` push rule by default (#17826) Clients will still only see this rule if the corresponding experimental feature, `msc4028_push_encrypted_events`, is also enabled. This aligns the implementation with MSC4028, specifically [this section](https://github.com/matrix-org/matrix-spec-proposals/blob/giomfo/push_encrypted_events/proposals/4028-push-all-encrypted-events-except-for-muted-rooms.md#unstable-prefix). See https://github.com/element-hq/synapse/issues/16846 for context. --- changelog.d/17826.misc | 1 + rust/src/push/base_rules.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17826.misc diff --git a/changelog.d/17826.misc b/changelog.d/17826.misc new file mode 100644 index 0000000000..9148c96a0d --- /dev/null +++ b/changelog.d/17826.misc @@ -0,0 +1 @@ +Enable the `.org.matrix.msc4028.encrypted_event` push rule by default in accordance with [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028). Note that the corresponding experimental feature must still be switched on for this push rule to have any effect. \ No newline at end of file diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 74f02d6001..e0832ada1c 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -81,7 +81,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ ))]), actions: Cow::Borrowed(&[Action::Notify]), default: true, - default_enabled: false, + default_enabled: true, }, PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"), From 11bc9a1b3ae21bd2041bf7cc4b93dd70a8c3b93e Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Mon, 14 Oct 2024 15:24:28 +0200 Subject: [PATCH 327/332] Implement MSC4210: Remove legacy mentions (#17783) --- changelog.d/17783.feature | 1 + rust/benches/evaluator.rs | 5 +++++ rust/src/push/evaluator.rs | 13 +++++++++++-- rust/src/push/mod.rs | 11 +++++++++++ synapse/config/experimental.py | 3 +++ synapse/push/bulk_push_rule_evaluator.py | 1 + synapse/storage/databases/main/push_rule.py | 1 + synapse/synapse_rust/push.pyi | 2 ++ tests/push/test_push_rule_evaluator.py | 2 ++ 9 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17783.feature diff --git a/changelog.d/17783.feature b/changelog.d/17783.feature new file mode 100644 index 0000000000..ce8c216418 --- /dev/null +++ b/changelog.d/17783.feature @@ -0,0 +1 @@ +Implement [MSC4210](https://github.com/matrix-org/matrix-spec-proposals/pull/4210): Remove legacy mentions. Contributed by @tulir @ Beeper. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 4fea035b96..28537e187e 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -60,6 +60,7 @@ fn bench_match_exact(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -105,6 +106,7 @@ fn bench_match_word(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -150,6 +152,7 @@ fn bench_match_word_miss(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -195,6 +198,7 @@ fn bench_eval_message(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -205,6 +209,7 @@ fn bench_eval_message(b: &mut Bencher) { false, false, false, + false, ); b.iter(|| eval.run(&rules, Some("bob"), Some("person"))); diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 2f4b6d47bb..0d436a1d7b 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -105,6 +105,9 @@ pub struct PushRuleEvaluator { /// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same /// flag as MSC1767 (extensible events core). msc3931_enabled: bool, + + // If MSC4210 (remove legacy mentions) is enabled. + msc4210_enabled: bool, } #[pymethods] @@ -122,6 +125,7 @@ impl PushRuleEvaluator { related_event_match_enabled, room_version_feature_flags, msc3931_enabled, + msc4210_enabled, ))] pub fn py_new( flattened_keys: BTreeMap, @@ -133,6 +137,7 @@ impl PushRuleEvaluator { related_event_match_enabled: bool, room_version_feature_flags: Vec, msc3931_enabled: bool, + msc4210_enabled: bool, ) -> Result { let body = match flattened_keys.get("content.body") { Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(), @@ -150,6 +155,7 @@ impl PushRuleEvaluator { related_event_match_enabled, room_version_feature_flags, msc3931_enabled, + msc4210_enabled, }) } @@ -176,7 +182,8 @@ impl PushRuleEvaluator { // For backwards-compatibility the legacy mention rules are disabled // if the event contains the 'm.mentions' property. - if self.has_mentions + // Additionally, MSC4210 always disables the legacy rules. + if (self.has_mentions || self.msc4210_enabled) && (rule_id == "global/override/.m.rule.contains_display_name" || rule_id == "global/content/.m.rule.contains_user_name" || rule_id == "global/override/.m.rule.roomnotif") @@ -526,6 +533,7 @@ fn push_rule_evaluator() { true, vec![], true, + false, ) .unwrap(); @@ -555,6 +563,7 @@ fn test_requires_room_version_supports_condition() { false, flags, true, + false, ) .unwrap(); @@ -582,7 +591,7 @@ fn test_requires_room_version_supports_condition() { }; let rules = PushRules::new(vec![custom_rule]); result = evaluator.run( - &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false), + &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false), None, None, ); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 2a452b69a3..ef8ed150d4 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -534,6 +534,7 @@ pub struct FilteredPushRules { msc3381_polls_enabled: bool, msc3664_enabled: bool, msc4028_push_encrypted_events: bool, + msc4210_enabled: bool, } #[pymethods] @@ -546,6 +547,7 @@ impl FilteredPushRules { msc3381_polls_enabled: bool, msc3664_enabled: bool, msc4028_push_encrypted_events: bool, + msc4210_enabled: bool, ) -> Self { Self { push_rules, @@ -554,6 +556,7 @@ impl FilteredPushRules { msc3381_polls_enabled, msc3664_enabled, msc4028_push_encrypted_events, + msc4210_enabled, } } @@ -596,6 +599,14 @@ impl FilteredPushRules { return false; } + if self.msc4210_enabled + && (rule.rule_id == "global/override/.m.rule.contains_display_name" + || rule.rule_id == "global/content/.m.rule.contains_user_name" + || rule.rule_id == "global/override/.m.rule.roomnotif") + { + return false; + } + true }) .map(|r| { diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 99185db93d..fd14db0d02 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -447,3 +447,6 @@ class ExperimentalConfig(Config): # MSC4151: Report room API (Client-Server API) self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False) + + # MSC4210: Remove legacy mentions + self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 679cbe9afa..9c0592a902 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -436,6 +436,7 @@ class BulkPushRuleEvaluator: self._related_event_match_enabled, event.room_version.msc3931_push_features, self.hs.config.experimental.msc1767_enabled, # MSC3931 flag + self.hs.config.experimental.msc4210_enabled, ) for uid, rules in rules_by_user.items(): diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index bbdde17711..86c87f78bf 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -109,6 +109,7 @@ def _load_rules( msc3664_enabled=experimental_config.msc3664_enabled, msc3381_polls_enabled=experimental_config.msc3381_polls_enabled, msc4028_push_encrypted_events=experimental_config.msc4028_push_encrypted_events, + msc4210_enabled=experimental_config.msc4210_enabled, ) return filtered_rules diff --git a/synapse/synapse_rust/push.pyi b/synapse/synapse_rust/push.pyi index 27a974e1bb..3f317c3288 100644 --- a/synapse/synapse_rust/push.pyi +++ b/synapse/synapse_rust/push.pyi @@ -48,6 +48,7 @@ class FilteredPushRules: msc3381_polls_enabled: bool, msc3664_enabled: bool, msc4028_push_encrypted_events: bool, + msc4210_enabled: bool, ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... @@ -65,6 +66,7 @@ class PushRuleEvaluator: related_event_match_enabled: bool, room_version_feature_flags: Tuple[str, ...], msc3931_enabled: bool, + msc4210_enabled: bool, ): ... def run( self, diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 420fbea998..c1f8e18bd9 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -149,6 +149,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): content: JsonMapping, *, related_events: Optional[JsonDict] = None, + msc4210: bool = False, ) -> PushRuleEvaluator: event = FrozenEvent( { @@ -174,6 +175,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): related_event_match_enabled=True, room_version_feature_flags=event.room_version.msc3931_push_features, msc3931_enabled=True, + msc4210_enabled=msc4210, ) def test_display_name(self) -> None: From ec885ffd334df29c99aaf722424d61a9e7739a1a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 15 Oct 2024 10:46:33 +0100 Subject: [PATCH 328/332] 1.117.0 --- CHANGES.md | 7 +++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ad0d1043bd..ba45fe0156 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.117.0 (2024-10-15) + +No significant changes since 1.117.0rc1. + + + + # Synapse 1.117.0rc1 (2024-10-08) ### Features diff --git a/debian/changelog b/debian/changelog index aa3c638359..1995fbf6f6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.117.0) stable; urgency=medium + + * New Synapse release 1.117.0. + + -- Synapse Packaging team Tue, 15 Oct 2024 10:46:30 +0100 + matrix-synapse-py3 (1.117.0~rc1) stable; urgency=medium * New Synapse release 1.117.0rc1. diff --git a/pyproject.toml b/pyproject.toml index bfcf368641..078ad3bc95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.117.0rc1" +version = "1.117.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 6ececb8f2ae52c6a77c54a740aaddbf87910469d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 14:29:05 +0000 Subject: [PATCH 329/332] Bump psycopg2 from 2.9.9 to 2.9.10 (#17843) Bumps [psycopg2](https://github.com/psycopg/psycopg2) from 2.9.9 to 2.9.10.
Changelog

Sourced from psycopg2's changelog.

Current release

What's new in psycopg 2.9.10 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Add support for Python 3.13.
  • Receive notifications on commit (:ticket:[#1728](https://github.com/psycopg/psycopg2/issues/1728)).
  • ~psycopg2.errorcodes map and ~psycopg2.errors classes updated to PostgreSQL 17.
  • Drop support for Python 3.7.

What's new in psycopg 2.9.9 ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Add support for Python 3.12.
  • Drop support for Python 3.6.

What's new in psycopg 2.9.8 ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Wheel package bundled with PostgreSQL 16 libpq in order to add support for recent features, such as sslcertmode.

What's new in psycopg 2.9.7 ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Fix propagation of exceptions raised during module initialization (:ticket:[#1598](https://github.com/psycopg/psycopg2/issues/1598)).
  • Fix building when pg_config returns an empty string (:ticket:[#1599](https://github.com/psycopg/psycopg2/issues/1599)).
  • Wheel package bundled with OpenSSL 1.1.1v.

What's new in psycopg 2.9.6 ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Package manylinux 2014 for aarch64 and ppc64le platforms, in order to include libpq 15 in the binary package (:ticket:[#1396](https://github.com/psycopg/psycopg2/issues/1396)).
  • Wheel package bundled with OpenSSL 1.1.1t.

What's new in psycopg 2.9.5 ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Add support for Python 3.11.
  • Add support for rowcount in MERGE statements in binary packages (:ticket:[#1497](https://github.com/psycopg/psycopg2/issues/1497)).

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=psycopg2&package-manager=pip&previous-version=2.9.9&new-version=2.9.10)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9fd95ff447..91394947d4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1595,24 +1595,20 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.9" +version = "2.9.10" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "psycopg2-2.9.9-cp310-cp310-win32.whl", hash = "sha256:38a8dcc6856f569068b47de286b472b7c473ac7977243593a288ebce0dc89516"}, - {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, - {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, - {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, - {file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"}, - {file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"}, - {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, - {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, - {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, - {file = "psycopg2-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:bac58c024c9922c23550af2a581998624d6e02350f4ae9c5f0bc642c633a2d5e"}, - {file = "psycopg2-2.9.9-cp39-cp39-win32.whl", hash = "sha256:c92811b2d4c9b6ea0285942b2e7cac98a59e166d59c588fe5cfe1eda58e72d59"}, - {file = "psycopg2-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:de80739447af31525feddeb8effd640782cf5998e1a4e9192ebdf829717e3913"}, - {file = "psycopg2-2.9.9.tar.gz", hash = "sha256:d1454bde93fb1e224166811694d600e746430c006fbb031ea06ecc2ea41bf156"}, + {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, + {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, + {file = "psycopg2-2.9.10-cp311-cp311-win32.whl", hash = "sha256:47c4f9875125344f4c2b870e41b6aad585901318068acd01de93f3677a6522c2"}, + {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, + {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, + {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, + {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, + {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, + {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, ] [[package]] From 0ab99369a1e4d4ce296dcd7b6d48f087718c5b51 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 14:34:00 +0000 Subject: [PATCH 330/332] Bump sentry-sdk from 2.16.0 to 2.17.0 (#17844) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 2.16.0 to 2.17.0.
Release notes

Sourced from sentry-sdk's releases.

2.17.0

Various fixes & improvements

Changelog

Sourced from sentry-sdk's changelog.

2.17.0

Various fixes & improvements

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=sentry-sdk&package-manager=pip&previous-version=2.16.0&new-version=2.17.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 91394947d4..945952f782 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2331,13 +2331,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.16.0" +version = "2.17.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.16.0-py2.py3-none-any.whl", hash = "sha256:49139c31ebcd398f4f6396b18910610a0c1602f6e67083240c33019d1f6aa30c"}, - {file = "sentry_sdk-2.16.0.tar.gz", hash = "sha256:90f733b32e15dfc1999e6b7aca67a38688a567329de4d6e184154a73f96c6892"}, + {file = "sentry_sdk-2.17.0-py2.py3-none-any.whl", hash = "sha256:625955884b862cc58748920f9e21efdfb8e0d4f98cca4ab0d3918576d5b606ad"}, + {file = "sentry_sdk-2.17.0.tar.gz", hash = "sha256:dd0a05352b78ffeacced73a94e86f38b32e2eae15fff5f30ca5abb568a72eacf"}, ] [package.dependencies] From 22aa925523a7f487bf86c8e1a44607fe31a07b07 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 14:52:18 +0000 Subject: [PATCH 331/332] Bump types-requests from 2.32.0.20240914 to 2.32.0.20241016 (#17841) Bumps [types-requests](https://github.com/python/typeshed) from 2.32.0.20240914 to 2.32.0.20241016.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=types-requests&package-manager=pip&previous-version=2.32.0.20240914&new-version=2.32.0.20241016)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 945952f782..5350c6ddfa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2817,13 +2817,13 @@ files = [ [[package]] name = "types-requests" -version = "2.32.0.20240914" +version = "2.32.0.20241016" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, - {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, + {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, + {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, ] [package.dependencies] From 9512b84a727a6519a407c72764b4c5ec36323a78 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 15:05:00 +0000 Subject: [PATCH 332/332] Bump mypy from 1.10.1 to 1.11.2 (#17842) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [mypy](https://github.com/python/mypy) from 1.10.1 to 1.11.2.
Changelog

Sourced from mypy's changelog.

Mypy 1.11.2

  • Alternative fix for a union-like literal string (Ivan Levkivskyi, PR 17639)
  • Unwrap TypedDict item types before storing (Ivan Levkivskyi, PR 17640)

Acknowledgements

Thanks to all mypy contributors who contributed to this release:

  • Alex Waygood
  • Alexander Leopold Shon
  • Ali Hamdan
  • Anders Kaseorg
  • Ben Brown
  • Bénédikt Tran
  • bzoracler
  • Christoph Tyralla
  • Christopher Barber
  • dexterkennedy
  • gilesgc
  • GiorgosPapoutsakis
  • Ivan Levkivskyi
  • Jelle Zijlstra
  • Jukka Lehtosalo
  • Marc Mueller
  • Matthieu Devlin
  • Michael R. Crusoe
  • Nikita Sobolev
  • Seo Sanghyeon
  • Shantanu
  • sobolevn
  • Steven Troxler
  • Tadeu Manoel
  • Tamir Duberstein
  • Tushar Sadhwani
  • urnest
  • Valentin Stanciu

I’d also like to thank my employer, Dropbox, for supporting mypy development.

Mypy 1.10

We’ve just uploaded mypy 1.10 to the Python Package Index (PyPI). Mypy is a static type checker for Python. This release includes new features, performance improvements and bug fixes. You can install it as follows:

python3 -m pip install -U mypy

You can read the full documentation for this release on Read the Docs.

Support TypeIs (PEP 742)

Mypy now supports TypeIs (PEP 742), which allows

... (truncated)

Commits
  • 789f02c Bump version to 1.11.2
  • 917cc75 An alternative fix for a union-like literal string (#17639)
  • 7d805b3 Unwrap TypedDict item types before storing (#17640)
  • 32675dd Revert "Fix Literal strings containing pipe characters" (#17638)
  • 778542b Revert "Fix RawExpressionType.accept crash with --cache-fine-grained" (#1...
  • 14ab742 Bump version to 1.11.2+dev
  • 570b90a Bump version to 1.11
  • b3a102e Fix RawExpressionType.accept crash with --cache-fine-grained (#17588)
  • aec04c7 Fix PEP 604 isinstance caching (#17563)
  • cb44e4d Fix typing.TypeAliasType being undefined on python < 3.12 (#17558)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=mypy&package-manager=pip&previous-version=1.10.1&new-version=1.11.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- changelog.d/17842.misc | 1 + poetry.lock | 58 +++++++++++++------------- scripts-dev/mypy_synapse_plugin.py | 2 +- synmark/__main__.py | 4 ++ tests/media/test_oembed.py | 3 ++ tests/push/test_push_rule_evaluator.py | 2 + 6 files changed, 40 insertions(+), 30 deletions(-) create mode 100644 changelog.d/17842.misc diff --git a/changelog.d/17842.misc b/changelog.d/17842.misc new file mode 100644 index 0000000000..78af706c31 --- /dev/null +++ b/changelog.d/17842.misc @@ -0,0 +1 @@ +Fix some typing issues uncovered by upgrading mypy to 1.11.x. diff --git a/poetry.lock b/poetry.lock index 5350c6ddfa..42d7f03c5c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1319,44 +1319,44 @@ files = [ [[package]] name = "mypy" -version = "1.10.1" +version = "1.11.2" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, - {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, - {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, - {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, - {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, - {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, - {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, - {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, - {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, - {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, - {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, - {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, - {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, - {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, - {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, - {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, - {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, - {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, - {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, + {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, + {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, + {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, + {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, + {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, + {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, + {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, + {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, + {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, + {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, + {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, + {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, + {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, + {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, + {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, + {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, + {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, + {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, + {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, + {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, + {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, + {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, + {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, + {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, + {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, + {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, + {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, ] [package.dependencies] mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 509047b41b..a15c3c005c 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -360,7 +360,7 @@ def is_cacheable( # For a type alias, check if the underlying real type is cachable. return is_cacheable(mypy.types.get_proper_type(rt), signature, verbose) - elif isinstance(rt, UninhabitedType) and rt.is_noreturn: + elif isinstance(rt, UninhabitedType): # There is no return value, just consider it cachable. This is only used # in tests. return True, None diff --git a/synmark/__main__.py b/synmark/__main__.py index 746261a1ec..4944c2f3b0 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -90,6 +90,10 @@ if __name__ == "__main__": if runner.args.worker: if runner.args.log: + # sys.__stdout__ can technically be None, just exit if it's the case + if not sys.__stdout__: + exit(1) + globalLogBeginner.beginLoggingTo( [textFileLogObserver(sys.__stdout__)], redirectStandardIO=False ) diff --git a/tests/media/test_oembed.py b/tests/media/test_oembed.py index 29d4580697..b8265ff9ca 100644 --- a/tests/media/test_oembed.py +++ b/tests/media/test_oembed.py @@ -20,6 +20,7 @@ # import json +from typing import Any from parameterized import parameterized @@ -52,6 +53,7 @@ class OEmbedTests(HomeserverTestCase): def test_version(self) -> None: """Accept versions that are similar to 1.0 as a string or int (or missing).""" + version: Any for version in ("1.0", 1.0, 1): result = self.parse_response({"version": version}) # An empty Open Graph response is an error, ensure the URL is included. @@ -69,6 +71,7 @@ class OEmbedTests(HomeserverTestCase): def test_cache_age(self) -> None: """Ensure a cache-age is parsed properly.""" + cache_age: Any # Correct-ish cache ages are allowed. for cache_age in ("1", 1.0, 1): result = self.parse_response({"cache_age": cache_age}) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index c1f8e18bd9..3898532acf 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -454,6 +454,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): {"value": False}, "incorrect values should not match", ) + value: Any for value in ("foobaz", 1, 1.1, None, [], {}): self._assert_not_matches( condition, @@ -494,6 +495,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): {"value": None}, "exact value should match", ) + value: Any for value in ("foobaz", True, False, 1, 1.1, [], {}): self._assert_not_matches( condition,