diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index 06aaeb851f..ebf866e3d5 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -30,7 +30,7 @@ jobs:
         run: docker buildx inspect
 
       - name: Install Cosign
-        uses: sigstore/cosign-installer@v3.5.0
+        uses: sigstore/cosign-installer@v3.7.0
 
       - name: Checkout repository
         uses: actions/checkout@v4
diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml
index f1e35fcd99..909b0a847f 100644
--- a/.github/workflows/fix_lint.yaml
+++ b/.github/workflows/fix_lint.yaml
@@ -29,17 +29,13 @@ jobs:
         with:
           install-project: "false"
 
-      - name: Import order (isort)
+      - name: Run ruff check
         continue-on-error: true
-        run: poetry run isort .
+        run: poetry run ruff check --fix .
 
-      - name: Code style (black)
+      - name: Run ruff format
         continue-on-error: true
-        run: poetry run black .
-
-      - name: Semantic checks (ruff)
-        continue-on-error: true
-        run: poetry run ruff --fix .
+        run: poetry run ruff format --quiet .
 
       - run: cargo clippy --all-features --fix -- -D warnings
         continue-on-error: true
@@ -49,4 +45,4 @@ jobs:
 
       - uses: stefanzweifel/git-auto-commit-action@v5
         with:
-            commit_message: "Attempt to fix linting"
+          commit_message: "Attempt to fix linting"
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 767495101b..5586bd6d94 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -131,15 +131,11 @@ jobs:
         with:
           install-project: "false"
 
-      - name: Import order (isort)
-        run: poetry run isort --check --diff .
+      - name: Run ruff check
+        run: poetry run ruff check --output-format=github .
 
-      - name: Code style (black)
-        run: poetry run black --check --diff .
-
-      - name: Semantic checks (ruff)
-        # --quiet suppresses the update check.
-        run: poetry run ruff check --quiet .
+      - name: Run ruff format
+        run: poetry run ruff format --check .
 
   lint-mypy:
     runs-on: ubuntu-latest
@@ -305,7 +301,7 @@ jobs:
       - lint-readme
     runs-on: ubuntu-latest
     steps:
-      - uses: matrix-org/done-action@v2
+      - uses: matrix-org/done-action@v3
         with:
           needs: ${{ toJSON(needs) }}
 
@@ -737,7 +733,7 @@ jobs:
       - linting-done
     runs-on: ubuntu-latest
     steps:
-      - uses: matrix-org/done-action@v2
+      - uses: matrix-org/done-action@v3
         with:
           needs: ${{ toJSON(needs) }}
 
diff --git a/CHANGES.md b/CHANGES.md
index a40aa26d78..ba45fe0156 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,548 @@
+# Synapse 1.117.0 (2024-10-15)
+
+No significant changes since 1.117.0rc1.
+
+
+
+
+# Synapse 1.117.0rc1 (2024-10-08)
+
+### Features
+
+- Add config option `redis.password_path`. ([\#17717](https://github.com/element-hq/synapse/issues/17717))
+
+### Bugfixes
+
+- Fix a rare bug introduced in v1.29.0 where invalidating a user's access token from a worker could raise an error. ([\#17779](https://github.com/element-hq/synapse/issues/17779))
+- In the response to `GET /_matrix/client/versions`, set the `unstable_features` flag for [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140) to `false` when server configuration disables support for delayed events. ([\#17780](https://github.com/element-hq/synapse/issues/17780))
+- Improve input validation and room membership checks in admin redaction API. ([\#17792](https://github.com/element-hq/synapse/issues/17792))
+
+### Improved Documentation
+
+- Clarify the docstring of `test_forget_when_not_left`. ([\#17628](https://github.com/element-hq/synapse/issues/17628))
+- Add documentation note about PYTHONMALLOC for accurate jemalloc memory tracking. Contributed by @hensg. ([\#17709](https://github.com/element-hq/synapse/issues/17709))
+- Remove spurious "TODO UPDATE ALL THIS" note in the Debian installation docs. ([\#17749](https://github.com/element-hq/synapse/issues/17749))
+- Explain how load balancing works for `federation_sender_instances`. ([\#17776](https://github.com/element-hq/synapse/issues/17776))
+
+### Internal Changes
+
+- Minor performance increase for large accounts using sliding sync. ([\#17751](https://github.com/element-hq/synapse/issues/17751))
+- Increase performance of the notifier when there are many syncing users. ([\#17765](https://github.com/element-hq/synapse/issues/17765), [\#17766](https://github.com/element-hq/synapse/issues/17766))
+- Fix performance of streams that don't change often. ([\#17767](https://github.com/element-hq/synapse/issues/17767))
+- Improve performance of sliding sync connections that do not ask for any rooms. ([\#17768](https://github.com/element-hq/synapse/issues/17768))
+- Reduce overhead of sliding sync E2EE loops. ([\#17771](https://github.com/element-hq/synapse/issues/17771))
+- Sliding sync minor performance speed up using new table. ([\#17787](https://github.com/element-hq/synapse/issues/17787))
+- Sliding sync minor performance improvement by omitting unchanged data from incremental responses. ([\#17788](https://github.com/element-hq/synapse/issues/17788))
+- Speed up sliding sync when there are many active subscriptions. ([\#17789](https://github.com/element-hq/synapse/issues/17789))
+- Add missing license headers on new source files. ([\#17799](https://github.com/element-hq/synapse/issues/17799))
+
+
+
+### Updates to locked dependencies
+
+* Bump phonenumbers from 8.13.45 to 8.13.46. ([\#17773](https://github.com/element-hq/synapse/issues/17773))
+* Bump python-multipart from 0.0.10 to 0.0.12. ([\#17772](https://github.com/element-hq/synapse/issues/17772))
+* Bump regex from 1.10.6 to 1.11.0. ([\#17770](https://github.com/element-hq/synapse/issues/17770))
+* Bump ruff from 0.6.7 to 0.6.8. ([\#17774](https://github.com/element-hq/synapse/issues/17774))
+
+# Synapse 1.116.0 (2024-10-01)
+
+No significant changes since 1.116.0rc2.
+
+
+
+
+# Synapse 1.116.0rc2 (2024-09-26)
+
+### Features
+
+- Add implementation of restricting who can overwrite a state event as proposed by [MSC3757](https://github.com/matrix-org/matrix-spec-proposals/pull/3757). ([\#17513](https://github.com/element-hq/synapse/issues/17513))
+
+
+
+
+# Synapse 1.116.0rc1 (2024-09-25)
+
+### Features
+
+- Add initial implementation of delayed events as proposed by [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140). ([\#17326](https://github.com/element-hq/synapse/issues/17326))
+- Add an asynchronous Admin API endpoint [to redact all a user's events](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#redact-all-the-events-of-a-user),
+  and [an endpoint to check on the status of that redaction task](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#check-the-status-of-a-redaction-process). ([\#17506](https://github.com/element-hq/synapse/issues/17506))
+- Add support for the `tags` and `not_tags` filters for [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17662](https://github.com/element-hq/synapse/issues/17662))
+- Guests can use the new media endpoints to download media, as described by [MSC4189](https://github.com/matrix-org/matrix-spec-proposals/pull/4189). ([\#17675](https://github.com/element-hq/synapse/issues/17675))
+- Add config option `turn_shared_secret_path`. ([\#17690](https://github.com/element-hq/synapse/issues/17690))
+- Return room tags in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync account data extension. ([\#17707](https://github.com/element-hq/synapse/issues/17707))
+
+### Bugfixes
+
+- Make sure we get up-to-date state information when using the new [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync tables to derive room membership. ([\#17692](https://github.com/element-hq/synapse/issues/17692))
+- Fix bug where room account data would not correctly be sent down [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync for old rooms. ([\#17695](https://github.com/element-hq/synapse/issues/17695))
+- Fix a bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync which could prevent /sync from working for certain user accounts. ([\#17727](https://github.com/element-hq/synapse/issues/17727), [\#17733](https://github.com/element-hq/synapse/issues/17733))
+- Ignore invites from ignored users in Sliding Sync. ([\#17729](https://github.com/element-hq/synapse/issues/17729))
+- Fix bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync where the server would incorrectly return a negative bump stamp, which caused Element X apps to stop syncing. ([\#17748](https://github.com/element-hq/synapse/issues/17748))
+
+### Internal Changes
+
+- Import pydantic objects from the `_pydantic_compat` module.
+  This allows `check_pydantic_models.py` to mock those pydantic objects
+  only in the synapse module, and not interfere with pydantic objects in
+  external dependencies. ([\#17667](https://github.com/element-hq/synapse/issues/17667))
+- Use [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync tables as a bulk shortcut for getting the max `event_stream_ordering` of rooms. ([\#17693](https://github.com/element-hq/synapse/issues/17693))
+- Speed up [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) sliding sync requests a bit where there are many room changes. ([\#17696](https://github.com/element-hq/synapse/issues/17696))
+- Refactor [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) sliding sync filter unit tests so the sliding sync API has better test coverage. ([\#17703](https://github.com/element-hq/synapse/issues/17703))
+- Fetch `bump_stamp`s more efficiently in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17723](https://github.com/element-hq/synapse/issues/17723))
+- Shortcut for checking if certain background updates have completed (utilized in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync). ([\#17724](https://github.com/element-hq/synapse/issues/17724))
+- More efficiently fetch rooms for [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17725](https://github.com/element-hq/synapse/issues/17725))
+- Fix `_bulk_get_max_event_pos` being inefficient. ([\#17728](https://github.com/element-hq/synapse/issues/17728))
+- Add cache to `get_tags_for_room(...)`. ([\#17730](https://github.com/element-hq/synapse/issues/17730))
+- Small performance improvement in speeding up [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17731](https://github.com/element-hq/synapse/issues/17731))
+- Minor speed up of initial [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) sliding sync requests. ([\#17734](https://github.com/element-hq/synapse/issues/17734))
+- Remove usage of the deprecated `cgi` module, deprecated in Python 3.11 and removed in Python 3.13. ([\#17741](https://github.com/element-hq/synapse/issues/17741))
+- Fix typing of a variable that is not `Unknown` anymore after updating `treq`. ([\#17744](https://github.com/element-hq/synapse/issues/17744))
+
+
+
+### Updates to locked dependencies
+
+* Bump anyhow from 1.0.86 to 1.0.89. ([\#17685](https://github.com/element-hq/synapse/issues/17685), [\#17716](https://github.com/element-hq/synapse/issues/17716))
+* Bump bytes from 1.7.1 to 1.7.2. ([\#17743](https://github.com/element-hq/synapse/issues/17743))
+* Bump cryptography from 43.0.0 to 43.0.1. ([\#17689](https://github.com/element-hq/synapse/issues/17689))
+* Bump idna from 3.8 to 3.10. ([\#17758](https://github.com/element-hq/synapse/issues/17758))
+* Bump msgpack from 1.0.8 to 1.1.0. ([\#17759](https://github.com/element-hq/synapse/issues/17759))
+* Bump phonenumbers from 8.13.44 to 8.13.45. ([\#17762](https://github.com/element-hq/synapse/issues/17762))
+* Bump prometheus-client from 0.20.0 to 0.21.0. ([\#17746](https://github.com/element-hq/synapse/issues/17746))
+* Bump pyasn1 from 0.6.0 to 0.6.1. ([\#17714](https://github.com/element-hq/synapse/issues/17714))
+* Bump pyasn1-modules from 0.4.0 to 0.4.1. ([\#17747](https://github.com/element-hq/synapse/issues/17747))
+* Bump pydantic from 2.8.2 to 2.9.2. ([\#17756](https://github.com/element-hq/synapse/issues/17756))
+* Bump python-multipart from 0.0.9 to 0.0.10. ([\#17745](https://github.com/element-hq/synapse/issues/17745))
+* Bump ruff from 0.6.4 to 0.6.7. ([\#17715](https://github.com/element-hq/synapse/issues/17715), [\#17760](https://github.com/element-hq/synapse/issues/17760))
+* Bump sentry-sdk from 2.13.0 to 2.14.0. ([\#17712](https://github.com/element-hq/synapse/issues/17712))
+* Bump serde from 1.0.209 to 1.0.210. ([\#17686](https://github.com/element-hq/synapse/issues/17686))
+* Bump serde_json from 1.0.127 to 1.0.128. ([\#17687](https://github.com/element-hq/synapse/issues/17687))
+* Bump treq from 23.11.0 to 24.9.1. ([\#17744](https://github.com/element-hq/synapse/issues/17744))
+* Bump types-pyyaml from 6.0.12.20240808 to 6.0.12.20240917. ([\#17755](https://github.com/element-hq/synapse/issues/17755))
+* Bump types-requests from 2.32.0.20240712 to 2.32.0.20240914. ([\#17713](https://github.com/element-hq/synapse/issues/17713))
+* Bump types-setuptools from 74.1.0.20240907 to 75.1.0.20240917. ([\#17757](https://github.com/element-hq/synapse/issues/17757))
+
+# Synapse 1.115.0 (2024-09-17)
+
+No significant changes since 1.115.0rc2.
+
+
+
+
+# Synapse 1.115.0rc2 (2024-09-12)
+
+### Internal Changes
+
+- Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. ([\#17652](https://github.com/element-hq/synapse/issues/17652))
+- Speed up sliding sync by reducing amount of data pulled out of the database for large rooms. ([\#17683](https://github.com/element-hq/synapse/issues/17683))
+
+
+
+
+# Synapse 1.115.0rc1 (2024-09-10)
+
+### Features
+
+- Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. ([\#17509](https://github.com/element-hq/synapse/issues/17509))
+
+### Bugfixes
+
+- Return `400 M_BAD_JSON` upon attempting to complete various room actions with a non-local user ID and unknown room ID, rather than an internal server error. ([\#17607](https://github.com/element-hq/synapse/issues/17607))
+- Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17626](https://github.com/element-hq/synapse/issues/17626))
+- Fix bug where we returned the wrong `bump_stamp` for invites in sliding sync response, causing incorrect ordering of invites in the room list. ([\#17674](https://github.com/element-hq/synapse/issues/17674))
+
+### Improved Documentation
+
+- Clarify that the admin api resource is only loaded on the main process and not workers. ([\#17590](https://github.com/element-hq/synapse/issues/17590))
+- Fixed typo in `saml2_config` config [example](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#saml2_config). ([\#17594](https://github.com/element-hq/synapse/issues/17594))
+
+### Deprecations and Removals
+
+- Stabilise [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156) by removing the `msc4156_enabled` config setting and defaulting it to `true`. ([\#17650](https://github.com/element-hq/synapse/issues/17650))
+
+### Internal Changes
+
+- Update [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407))
+- Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. ([\#17512](https://github.com/element-hq/synapse/issues/17512), [\#17632](https://github.com/element-hq/synapse/issues/17632), [\#17633](https://github.com/element-hq/synapse/issues/17633), [\#17634](https://github.com/element-hq/synapse/issues/17634), [\#17635](https://github.com/element-hq/synapse/issues/17635), [\#17636](https://github.com/element-hq/synapse/issues/17636), [\#17641](https://github.com/element-hq/synapse/issues/17641), [\#17654](https://github.com/element-hq/synapse/issues/17654), [\#17673](https://github.com/element-hq/synapse/issues/17673))
+- Store sliding sync per-connection state in the database. ([\#17599](https://github.com/element-hq/synapse/issues/17599), [\#17631](https://github.com/element-hq/synapse/issues/17631))
+- Make the sliding sync `PerConnectionState` class immutable. ([\#17600](https://github.com/element-hq/synapse/issues/17600))
+- Replace `isort` and `black` with `ruff`. ([\#17620](https://github.com/element-hq/synapse/issues/17620), [\#17643](https://github.com/element-hq/synapse/issues/17643))
+- Sliding Sync: Split up `get_room_membership_for_user_at_to_token`. ([\#17629](https://github.com/element-hq/synapse/issues/17629))
+- Use new database tables for sliding sync. ([\#17630](https://github.com/element-hq/synapse/issues/17630), [\#17649](https://github.com/element-hq/synapse/issues/17649))
+- Prevent duplicate tags being added to Sliding Sync traces. ([\#17655](https://github.com/element-hq/synapse/issues/17655))
+- Get `bump_stamp` from [new sliding sync tables](https://github.com/element-hq/synapse/pull/17512) which should be faster. ([\#17658](https://github.com/element-hq/synapse/issues/17658))
+- Speed up incremental Sliding Sync requests by avoiding extra work. ([\#17665](https://github.com/element-hq/synapse/issues/17665))
+- Small performance improvement in speeding up sliding sync. ([\#17666](https://github.com/element-hq/synapse/issues/17666), [\#17670](https://github.com/element-hq/synapse/issues/17670), [\#17672](https://github.com/element-hq/synapse/issues/17672))
+- Speed up sliding sync by reducing number of database calls. ([\#17684](https://github.com/element-hq/synapse/issues/17684))
+- Speed up sync by pulling out fewer events from the database. ([\#17688](https://github.com/element-hq/synapse/issues/17688))
+
+
+
+### Updates to locked dependencies
+
+* Bump authlib from 1.3.1 to 1.3.2. ([\#17679](https://github.com/element-hq/synapse/issues/17679))
+* Bump idna from 3.7 to 3.8. ([\#17682](https://github.com/element-hq/synapse/issues/17682))
+* Bump ruff from 0.6.2 to 0.6.4. ([\#17680](https://github.com/element-hq/synapse/issues/17680))
+* Bump towncrier from 24.7.1 to 24.8.0. ([\#17645](https://github.com/element-hq/synapse/issues/17645))
+* Bump twisted from 24.7.0rc1 to 24.7.0. ([\#17647](https://github.com/element-hq/synapse/issues/17647))
+* Bump types-pillow from 10.2.0.20240520 to 10.2.0.20240822. ([\#17644](https://github.com/element-hq/synapse/issues/17644))
+* Bump types-psycopg2 from 2.9.21.20240417 to 2.9.21.20240819. ([\#17646](https://github.com/element-hq/synapse/issues/17646))
+* Bump types-setuptools from 71.1.0.20240818 to 74.1.0.20240907. ([\#17681](https://github.com/element-hq/synapse/issues/17681))
+
+# Synapse 1.114.0 (2024-09-02)
+
+This release enables support for
+[MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) —
+Simplified Sliding Sync. This allows using the upcoming releases of the Element
+X mobile apps without having to run a Sliding Sync Proxy.
+
+
+### Features
+
+- Enable native sliding sync support ([MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) and [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186)) by default. ([\#17648](https://github.com/element-hq/synapse/issues/17648))
+
+
+
+
+# Synapse 1.114.0rc3 (2024-08-30)
+
+### Bugfixes
+
+- Fix regression in v1.114.0rc2 that caused workers to fail to start. ([\#17626](https://github.com/element-hq/synapse/issues/17626))
+
+
+
+
+# Synapse 1.114.0rc2 (2024-08-30)
+
+### Features
+
+- Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. ([\#17509](https://github.com/element-hq/synapse/issues/17509))
+- Make `hash_password` script accept password input from stdin. ([\#17608](https://github.com/element-hq/synapse/issues/17608))
+
+### Bugfixes
+
+- Fix hierarchy returning 403 when room is accessible through federation. Contributed by Krishan (@kfiven). ([\#17194](https://github.com/element-hq/synapse/issues/17194))
+- Fix content-length on federation `/thumbnail` responses. ([\#17532](https://github.com/element-hq/synapse/issues/17532))
+- Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17543](https://github.com/element-hq/synapse/issues/17543))
+
+### Internal Changes
+
+- MSC3861: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407))
+- Refactor sliding sync class into multiple files. ([\#17595](https://github.com/element-hq/synapse/issues/17595))
+- Store sliding sync per-connection state in the database. ([\#17599](https://github.com/element-hq/synapse/issues/17599))
+- Make the sliding sync `PerConnectionState` class immutable. ([\#17600](https://github.com/element-hq/synapse/issues/17600))
+- Add support to `@tag_args` for standalone functions. ([\#17604](https://github.com/element-hq/synapse/issues/17604))
+- Speed up incremental syncs in sliding sync by adding some more caching. ([\#17606](https://github.com/element-hq/synapse/issues/17606))
+- Always return the user's own read receipts in sliding sync. ([\#17617](https://github.com/element-hq/synapse/issues/17617))
+- Replace `isort` and `black` with `ruff`. ([\#17620](https://github.com/element-hq/synapse/issues/17620))
+- Refactor sliding sync code to move room list logic out into a separate class. ([\#17622](https://github.com/element-hq/synapse/issues/17622))
+
+
+
+### Updates to locked dependencies
+
+* Bump attrs from 23.2.0 to 24.2.0. ([\#17609](https://github.com/element-hq/synapse/issues/17609))
+* Bump cryptography from 42.0.8 to 43.0.0. ([\#17584](https://github.com/element-hq/synapse/issues/17584))
+* Bump phonenumbers from 8.13.43 to 8.13.44. ([\#17610](https://github.com/element-hq/synapse/issues/17610))
+* Bump pygithub from 2.3.0 to 2.4.0. ([\#17612](https://github.com/element-hq/synapse/issues/17612))
+* Bump pyyaml from 6.0.1 to 6.0.2. ([\#17611](https://github.com/element-hq/synapse/issues/17611))
+* Bump sentry-sdk from 2.12.0 to 2.13.0. ([\#17585](https://github.com/element-hq/synapse/issues/17585))
+* Bump serde from 1.0.206 to 1.0.208. ([\#17581](https://github.com/element-hq/synapse/issues/17581))
+* Bump serde from 1.0.208 to 1.0.209. ([\#17613](https://github.com/element-hq/synapse/issues/17613))
+* Bump serde_json from 1.0.124 to 1.0.125. ([\#17582](https://github.com/element-hq/synapse/issues/17582))
+* Bump serde_json from 1.0.125 to 1.0.127. ([\#17614](https://github.com/element-hq/synapse/issues/17614))
+* Bump types-jsonschema from 4.23.0.20240712 to 4.23.0.20240813. ([\#17583](https://github.com/element-hq/synapse/issues/17583))
+* Bump types-setuptools from 71.1.0.20240726 to 71.1.0.20240818. ([\#17586](https://github.com/element-hq/synapse/issues/17586))
+
+# Synapse 1.114.0rc1 (2024-08-20)
+
+### Features
+
+- Add a flag to `/versions`, `org.matrix.simplified_msc3575`, to indicate whether experimental sliding sync support has been enabled. ([\#17571](https://github.com/element-hq/synapse/issues/17571))
+- Handle changes in `timeline_limit` in experimental sliding sync. ([\#17579](https://github.com/element-hq/synapse/issues/17579))
+- Correctly track read receipts that should be sent down in experimental sliding sync. ([\#17575](https://github.com/element-hq/synapse/issues/17575), [\#17589](https://github.com/element-hq/synapse/issues/17589), [\#17592](https://github.com/element-hq/synapse/issues/17592))
+
+### Bugfixes
+
+- Start handlers for new media endpoints when media resource configured. ([\#17483](https://github.com/element-hq/synapse/issues/17483))
+- Fix timeline ordering (using `stream_ordering` instead of topological ordering) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17510](https://github.com/element-hq/synapse/issues/17510))
+- Fix experimental sliding sync implementation to remember any updates in rooms that were not sent down immediately. ([\#17535](https://github.com/element-hq/synapse/issues/17535))
+- Better exclude partially stated rooms if we must await full state in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17538](https://github.com/element-hq/synapse/issues/17538))
+- Handle lower-case http headers in `_Mulitpart_Parser_Protocol`. ([\#17545](https://github.com/element-hq/synapse/issues/17545))
+- Fix fetching federation signing keys from servers that omit `old_verify_keys`. Contributed by @tulir @ Beeper. ([\#17568](https://github.com/element-hq/synapse/issues/17568))
+- Fix bug where we would respond with an error when a remote server asked for media that had a length of 0, using the new multipart federation media endpoint. ([\#17570](https://github.com/element-hq/synapse/issues/17570))
+
+### Improved Documentation
+
+- Clarify default behaviour of the
+  [`auto_accept_invites.worker_to_run_on`](https://element-hq.github.io/synapse/develop/usage/configuration/config_documentation.html#auto-accept-invites)
+  option. ([\#17515](https://github.com/element-hq/synapse/issues/17515))
+- Improve docstrings for profile methods. ([\#17559](https://github.com/element-hq/synapse/issues/17559))
+
+### Internal Changes
+
+- Add more tracing to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17514](https://github.com/element-hq/synapse/issues/17514))
+- Fixup comment in sliding sync implementation. ([\#17531](https://github.com/element-hq/synapse/issues/17531))
+- Replace override of deprecated method `HTTPAdapter.get_connection` with `get_connection_with_tls_context`. ([\#17536](https://github.com/element-hq/synapse/issues/17536))
+- Fix performance of device lists in `/key/changes` and sliding sync. ([\#17537](https://github.com/element-hq/synapse/issues/17537), [\#17548](https://github.com/element-hq/synapse/issues/17548))
+- Bump setuptools from 67.6.0 to 72.1.0. ([\#17542](https://github.com/element-hq/synapse/issues/17542))
+- Add a utility function for generating random event IDs. ([\#17557](https://github.com/element-hq/synapse/issues/17557))
+- Speed up responding to media requests. ([\#17558](https://github.com/element-hq/synapse/issues/17558), [\#17561](https://github.com/element-hq/synapse/issues/17561), [\#17564](https://github.com/element-hq/synapse/issues/17564), [\#17566](https://github.com/element-hq/synapse/issues/17566), [\#17567](https://github.com/element-hq/synapse/issues/17567), [\#17569](https://github.com/element-hq/synapse/issues/17569))
+- Test github token before running release script steps. ([\#17562](https://github.com/element-hq/synapse/issues/17562))
+- Reduce log spam of multipart files. ([\#17563](https://github.com/element-hq/synapse/issues/17563))
+- Refactor per-connection state in experimental sliding sync handler. ([\#17574](https://github.com/element-hq/synapse/issues/17574))
+- Add histogram metrics for sliding sync processing time. ([\#17593](https://github.com/element-hq/synapse/issues/17593))
+
+
+
+### Updates to locked dependencies
+
+* Bump bytes from 1.6.1 to 1.7.1. ([\#17526](https://github.com/element-hq/synapse/issues/17526))
+* Bump lxml from 5.2.2 to 5.3.0. ([\#17550](https://github.com/element-hq/synapse/issues/17550))
+* Bump phonenumbers from 8.13.42 to 8.13.43. ([\#17551](https://github.com/element-hq/synapse/issues/17551))
+* Bump regex from 1.10.5 to 1.10.6. ([\#17527](https://github.com/element-hq/synapse/issues/17527))
+* Bump sentry-sdk from 2.10.0 to 2.12.0. ([\#17553](https://github.com/element-hq/synapse/issues/17553))
+* Bump serde from 1.0.204 to 1.0.206. ([\#17556](https://github.com/element-hq/synapse/issues/17556))
+* Bump serde_json from 1.0.122 to 1.0.124. ([\#17555](https://github.com/element-hq/synapse/issues/17555))
+* Bump sigstore/cosign-installer from 3.5.0 to 3.6.0. ([\#17549](https://github.com/element-hq/synapse/issues/17549))
+* Bump types-pyyaml from 6.0.12.20240311 to 6.0.12.20240808. ([\#17552](https://github.com/element-hq/synapse/issues/17552))
+* Bump types-requests from 2.31.0.20240406 to 2.32.0.20240712. ([\#17524](https://github.com/element-hq/synapse/issues/17524))
+
+# Synapse 1.113.0 (2024-08-13)
+
+No significant changes since 1.113.0rc1.
+
+
+
+
+# Synapse 1.113.0rc1 (2024-08-06)
+
+### Features
+
+- Track which rooms have been sent to clients in the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17447](https://github.com/element-hq/synapse/issues/17447))
+- Add Account Data extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17477](https://github.com/element-hq/synapse/issues/17477))
+- Add receipts extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17489](https://github.com/element-hq/synapse/issues/17489))
+- Add typing notification extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17505](https://github.com/element-hq/synapse/issues/17505))
+
+### Bugfixes
+
+- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to handle invite/knock rooms when filtering. ([\#17450](https://github.com/element-hq/synapse/issues/17450))
+- Fix a bug introduced in v1.110.0 which caused `/keys/query` to return incomplete results, leading to high network activity and CPU usage on Matrix clients. ([\#17499](https://github.com/element-hq/synapse/issues/17499))
+
+### Improved Documentation
+
+- Update the [`allowed_local_3pids`](https://element-hq.github.io/synapse/v1.112/usage/configuration/config_documentation.html#allowed_local_3pids) config option's msisdn address to a working example. ([\#17476](https://github.com/element-hq/synapse/issues/17476))
+
+### Internal Changes
+
+- Change sliding sync to use their own token format in preparation for storing per-connection state. ([\#17452](https://github.com/element-hq/synapse/issues/17452))
+- Ensure we don't send down negative `bump_stamp` in experimental sliding sync endpoint. ([\#17478](https://github.com/element-hq/synapse/issues/17478))
+- Do not send down empty room entries down experimental sliding sync endpoint. ([\#17479](https://github.com/element-hq/synapse/issues/17479))
+- Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. ([\#17481](https://github.com/element-hq/synapse/issues/17481), [\#17482](https://github.com/element-hq/synapse/issues/17482))
+- Add some opentracing tags and logging to the experimental sliding sync implementation. ([\#17501](https://github.com/element-hq/synapse/issues/17501))
+- Split and move Sliding Sync tests so we have some more sane test file sizes. ([\#17504](https://github.com/element-hq/synapse/issues/17504))
+- Update the `limited` field description in the Sliding Sync response to accurately describe what it actually represents. ([\#17507](https://github.com/element-hq/synapse/issues/17507))
+- Easier to understand `timeline` assertions in Sliding Sync tests. ([\#17511](https://github.com/element-hq/synapse/issues/17511))
+- Reset the sliding sync connection if we don't recognize the per-connection state position. ([\#17529](https://github.com/element-hq/synapse/issues/17529))
+
+
+
+### Updates to locked dependencies
+
+* Bump bcrypt from 4.1.3 to 4.2.0. ([\#17495](https://github.com/element-hq/synapse/issues/17495))
+* Bump black from 24.4.2 to 24.8.0. ([\#17522](https://github.com/element-hq/synapse/issues/17522))
+* Bump phonenumbers from 8.13.39 to 8.13.42. ([\#17521](https://github.com/element-hq/synapse/issues/17521))
+* Bump ruff from 0.5.4 to 0.5.5. ([\#17494](https://github.com/element-hq/synapse/issues/17494))
+* Bump serde_json from 1.0.120 to 1.0.121. ([\#17493](https://github.com/element-hq/synapse/issues/17493))
+* Bump serde_json from 1.0.121 to 1.0.122. ([\#17525](https://github.com/element-hq/synapse/issues/17525))
+* Bump towncrier from 23.11.0 to 24.7.1. ([\#17523](https://github.com/element-hq/synapse/issues/17523))
+* Bump types-pyopenssl from 24.1.0.20240425 to 24.1.0.20240722. ([\#17496](https://github.com/element-hq/synapse/issues/17496))
+* Bump types-setuptools from 70.1.0.20240627 to 71.1.0.20240726. ([\#17497](https://github.com/element-hq/synapse/issues/17497))
+
+# Synapse 1.112.0 (2024-07-30)
+
+This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
+
+Note that this security fix is also available as **Synapse 1.111.1**, which does not include the rest of the changes in Synapse 1.112.0.
+
+This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request.
+If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality.
+
+With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**.
+The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration.
+
+Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today.
+
+**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time.
+
+### Internal Changes
+
+- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502))
+
+
+# Synapse 1.112.0rc1 (2024-07-23)
+
+Please note that this release candidate does not include the security dependency update
+included in version 1.111.1 as this version was released before 1.111.1.
+The same security fix can be found in the full release of 1.112.0.
+
+### Features
+
+- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416))
+- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418))
+- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419))
+- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429))
+- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432))
+- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433))
+- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454))
+
+### Bugfixes
+
+- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231))
+- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434))
+- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435))
+- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438))
+
+### Improved Documentation
+
+- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387))
+- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423))
+- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451))
+
+### Internal Changes
+
+- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424))
+- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426))
+- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428))
+- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439))
+- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449))
+- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453))
+- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458))
+- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460))
+- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461))
+- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468))
+- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469))
+
+
+
+### Updates to locked dependencies
+
+* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441))
+* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464))
+* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444))
+* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440))
+* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445))
+* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465))
+* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466))
+* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456))
+* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467))
+* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448))
+* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443))
+* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446))
+* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442))
+* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427))
+
+
+# Synapse 1.111.1 (2024-07-30)
+
+This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
+
+This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request.
+If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality.
+
+With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**.
+The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration.
+
+Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today.
+
+**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time.
+
+
+### Internal Changes
+
+- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502))
+
+
+# Synapse 1.111.0 (2024-07-16)
+
+No significant changes since 1.111.0rc2.
+
+
+
+
+# Synapse 1.111.0rc2 (2024-07-10)
+
+### Bugfixes
+
+- Fix bug where using `synapse.app.media_repository` worker configuration would break the new media endpoints. ([\#17420](https://github.com/element-hq/synapse/issues/17420))
+
+### Improved Documentation
+
+- Document the new federation media worker endpoints in the [upgrade notes](https://element-hq.github.io/synapse/v1.111/upgrade.html) and [worker docs](https://element-hq.github.io/synapse/v1.111/workers.html). ([\#17421](https://github.com/element-hq/synapse/issues/17421))
+
+### Internal Changes
+
+- Route authenticated federation media requests to media repository workers in Complement tests. ([\#17422](https://github.com/element-hq/synapse/issues/17422))
+
+
+
+
+# Synapse 1.111.0rc1 (2024-07-09)
+
+### Features
+
+- Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17320](https://github.com/element-hq/synapse/issues/17320))
+- Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17337](https://github.com/element-hq/synapse/issues/17337))
+- Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17342](https://github.com/element-hq/synapse/issues/17342))
+- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding [`_matrix/client/v1/media/download`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediadownloadservernamemediaid) endpoint. ([\#17365](https://github.com/element-hq/synapse/issues/17365))
+- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
+  by adding [`_matrix/client/v1/media/thumbnail`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediathumbnailservernamemediaid), [`_matrix/federation/v1/media/thumbnail`](https://spec.matrix.org/v1.11/server-server-api/#get_matrixfederationv1mediathumbnailmediaid) endpoints and stabilizing the
+  remaining [`_matrix/client/v1/media`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediaconfig) endpoints. ([\#17388](https://github.com/element-hq/synapse/issues/17388))
+- Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17395](https://github.com/element-hq/synapse/issues/17395))
+- Forget all of a user's rooms upon deactivation, preventing local room purges from being blocked on deactivated users. ([\#17400](https://github.com/element-hq/synapse/issues/17400))
+- Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). ([\#17403](https://github.com/element-hq/synapse/issues/17403))
+- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow overriding the introspection endpoint. ([\#17406](https://github.com/element-hq/synapse/issues/17406))
+
+### Bugfixes
+
+- Fix rare race which caused no new to-device messages to be received from remote server. ([\#17362](https://github.com/element-hq/synapse/issues/17362))
+- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. ([\#17398](https://github.com/element-hq/synapse/issues/17398))
+
+### Improved Documentation
+
+- Clarify that `url_preview_url_blacklist` is a usability feature. ([\#17356](https://github.com/element-hq/synapse/issues/17356))
+- Fix broken links in README. ([\#17379](https://github.com/element-hq/synapse/issues/17379))
+- Clarify that changelog content *and file extension* need to match in order for entries to merge. ([\#17399](https://github.com/element-hq/synapse/issues/17399))
+
+### Internal Changes
+
+- Make the release script create a release branch for Complement as well. ([\#17318](https://github.com/element-hq/synapse/issues/17318))
+- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363))
+- Add CI check for the README. ([\#17367](https://github.com/element-hq/synapse/issues/17367))
+- Fix linting errors from new `ruff` version. ([\#17381](https://github.com/element-hq/synapse/issues/17381), [\#17411](https://github.com/element-hq/synapse/issues/17411))
+- Fix building debian packages on non-clean checkouts. ([\#17390](https://github.com/element-hq/synapse/issues/17390))
+- Finish up work to allow per-user feature flags. ([\#17392](https://github.com/element-hq/synapse/issues/17392), [\#17410](https://github.com/element-hq/synapse/issues/17410))
+- Allow enabling sliding sync per-user. ([\#17393](https://github.com/element-hq/synapse/issues/17393))
+
+
+
+### Updates to locked dependencies
+
+* Bump certifi from 2023.7.22 to 2024.7.4. ([\#17404](https://github.com/element-hq/synapse/issues/17404))
+* Bump cryptography from 42.0.7 to 42.0.8. ([\#17382](https://github.com/element-hq/synapse/issues/17382))
+* Bump ijson from 3.2.3 to 3.3.0. ([\#17413](https://github.com/element-hq/synapse/issues/17413))
+* Bump log from 0.4.21 to 0.4.22. ([\#17384](https://github.com/element-hq/synapse/issues/17384))
+* Bump mypy-zope from 1.0.4 to 1.0.5. ([\#17414](https://github.com/element-hq/synapse/issues/17414))
+* Bump pillow from 10.3.0 to 10.4.0. ([\#17412](https://github.com/element-hq/synapse/issues/17412))
+* Bump pydantic from 2.7.1 to 2.8.2. ([\#17415](https://github.com/element-hq/synapse/issues/17415))
+* Bump ruff from 0.3.7 to 0.5.0. ([\#17381](https://github.com/element-hq/synapse/issues/17381))
+* Bump serde from 1.0.203 to 1.0.204. ([\#17409](https://github.com/element-hq/synapse/issues/17409))
+* Bump serde_json from 1.0.117 to 1.0.120. ([\#17385](https://github.com/element-hq/synapse/issues/17385), [\#17408](https://github.com/element-hq/synapse/issues/17408))
+* Bump types-setuptools from 69.5.0.20240423 to 70.1.0.20240627. ([\#17380](https://github.com/element-hq/synapse/issues/17380))
+
 # Synapse 1.110.0 (2024-07-03)
 
 No significant changes since 1.110.0rc3.
diff --git a/Cargo.lock b/Cargo.lock
index 4353e55977..dfc8dc6047 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13,9 +13,9 @@ dependencies = [
 
 [[package]]
 name = "anyhow"
-version = "1.0.86"
+version = "1.0.89"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
+checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
 
 [[package]]
 name = "arc-swap"
@@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
 
 [[package]]
 name = "bytes"
-version = "1.6.0"
+version = "1.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
+checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3"
 
 [[package]]
 name = "cfg-if"
@@ -444,9 +444,9 @@ dependencies = [
 
 [[package]]
 name = "regex"
-version = "1.10.5"
+version = "1.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
+checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
 dependencies = [
  "aho-corasick",
  "memchr",
@@ -456,9 +456,9 @@ dependencies = [
 
 [[package]]
 name = "regex-automata"
-version = "0.4.6"
+version = "0.4.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea"
+checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3"
 dependencies = [
  "aho-corasick",
  "memchr",
@@ -467,9 +467,9 @@ dependencies = [
 
 [[package]]
 name = "regex-syntax"
-version = "0.8.3"
+version = "0.8.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
 
 [[package]]
 name = "ryu"
@@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
 
 [[package]]
 name = "serde"
-version = "1.0.203"
+version = "1.0.210"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
+checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
 dependencies = [
  "serde_derive",
 ]
 
 [[package]]
 name = "serde_derive"
-version = "1.0.203"
+version = "1.0.210"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
+checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -505,11 +505,12 @@ dependencies = [
 
 [[package]]
 name = "serde_json"
-version = "1.0.119"
+version = "1.0.128"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8eddb61f0697cc3989c5d64b452f5488e2b8a60fd7d5076a3045076ffef8cb0"
+checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8"
 dependencies = [
  "itoa",
+ "memchr",
  "ryu",
  "serde",
 ]
@@ -597,9 +598,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
 
 [[package]]
 name = "ulid"
-version = "1.1.2"
+version = "1.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34778c17965aa2a08913b57e1f34db9b4a63f5de31768b55bf20d2795f921259"
+checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289"
 dependencies = [
  "getrandom",
  "rand",
diff --git a/README.rst b/README.rst
index a52e0c193d..2fe4a7e43f 100644
--- a/README.rst
+++ b/README.rst
@@ -1,4 +1,4 @@
-.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7
+.. image:: ./docs/element_logo_white_bg.svg
    :height: 60px
 
 **Element Synapse - Matrix homeserver implementation**
@@ -158,7 +158,7 @@ it:
 
 We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to
 the public internet. Without it, anyone can freely register accounts on your homeserver.
-This can be exploited by attackers to create spambots targetting the rest of the Matrix
+This can be exploited by attackers to create spambots targeting the rest of the Matrix
 federation.
 
 Your new user name will be formed partly from the ``server_name``, and partly
diff --git a/changelog.d/17318.misc b/changelog.d/17318.misc
deleted file mode 100644
index b0b21da23b..0000000000
--- a/changelog.d/17318.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make the release script create a release branch for Complement as well.
diff --git a/changelog.d/17320.feature b/changelog.d/17320.feature
deleted file mode 100644
index 1e524f3eca..0000000000
--- a/changelog.d/17320.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17337.feature b/changelog.d/17337.feature
deleted file mode 100644
index bc8f437dbe..0000000000
--- a/changelog.d/17337.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17342.feature b/changelog.d/17342.feature
deleted file mode 100644
index b2671ea14a..0000000000
--- a/changelog.d/17342.feature
+++ /dev/null
@@ -1 +0,0 @@
-Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
diff --git a/changelog.d/17356.doc b/changelog.d/17356.doc
deleted file mode 100644
index b393d8d147..0000000000
--- a/changelog.d/17356.doc
+++ /dev/null
@@ -1 +0,0 @@
-Clarify `url_preview_url_blacklist` is a usability feature.
diff --git a/changelog.d/17362.bugfix b/changelog.d/17362.bugfix
deleted file mode 100644
index a91ce9fc06..0000000000
--- a/changelog.d/17362.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix rare race which causes no new to-device messages to be received from remote server.
diff --git a/changelog.d/17363.misc b/changelog.d/17363.misc
deleted file mode 100644
index 555e2225ba..0000000000
--- a/changelog.d/17363.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix uploading packages to PyPi.
\ No newline at end of file
diff --git a/changelog.d/17365.feature b/changelog.d/17365.feature
deleted file mode 100644
index 61acc32f32..0000000000
--- a/changelog.d/17365.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding _matrix/client/v1/media/download endpoint.
\ No newline at end of file
diff --git a/changelog.d/17367.misc b/changelog.d/17367.misc
deleted file mode 100644
index 361731b8ae..0000000000
--- a/changelog.d/17367.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add CI check for the README.
\ No newline at end of file
diff --git a/changelog.d/17379.doc b/changelog.d/17379.doc
deleted file mode 100644
index 08c2544426..0000000000
--- a/changelog.d/17379.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix broken links in README.
diff --git a/changelog.d/17381.misc b/changelog.d/17381.misc
deleted file mode 100644
index ca9830c136..0000000000
--- a/changelog.d/17381.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix linting errors from new `ruff` version.
diff --git a/changelog.d/17390.misc b/changelog.d/17390.misc
deleted file mode 100644
index 6a4e344c5c..0000000000
--- a/changelog.d/17390.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix building debian packages on non-clean checkouts.
diff --git a/changelog.d/17392.misc b/changelog.d/17392.misc
deleted file mode 100644
index 76e3976e28..0000000000
--- a/changelog.d/17392.misc
+++ /dev/null
@@ -1 +0,0 @@
-Finish up work to allow per-user feature flags.
diff --git a/changelog.d/17393.misc b/changelog.d/17393.misc
deleted file mode 100644
index e131225276..0000000000
--- a/changelog.d/17393.misc
+++ /dev/null
@@ -1 +0,0 @@
-Allow enabling sliding sync per-user.
diff --git a/changelog.d/17399.doc b/changelog.d/17399.doc
deleted file mode 100644
index 7a3fcf24c0..0000000000
--- a/changelog.d/17399.doc
+++ /dev/null
@@ -1 +0,0 @@
-Clarify that changelog content *and file extension* need to match in order for entries to merge.
diff --git a/changelog.d/17400.feature b/changelog.d/17400.feature
deleted file mode 100644
index 4dca90890c..0000000000
--- a/changelog.d/17400.feature
+++ /dev/null
@@ -1 +0,0 @@
-Forget all of a user's rooms upon deactivation, enabling future purges.
\ No newline at end of file
diff --git a/changelog.d/17627.doc b/changelog.d/17627.doc
new file mode 100644
index 0000000000..487a0aea0d
--- /dev/null
+++ b/changelog.d/17627.doc
@@ -0,0 +1 @@
+Clarify when the `user_may_invite` and `user_may_send_3pid_invite` module callbacks are called.
\ No newline at end of file
diff --git a/changelog.d/17708.feature b/changelog.d/17708.feature
new file mode 100644
index 0000000000..90ec810f50
--- /dev/null
+++ b/changelog.d/17708.feature
@@ -0,0 +1 @@
+Added the `display_name_claim` option to the JWT configuration. This option allows specifying the claim key that contains the user's display name in the JWT payload.
\ No newline at end of file
diff --git a/changelog.d/17718.misc b/changelog.d/17718.misc
new file mode 100644
index 0000000000..ea73a03f53
--- /dev/null
+++ b/changelog.d/17718.misc
@@ -0,0 +1 @@
+Slight optimization when fetching state/events for Sliding Sync.
diff --git a/changelog.d/17736.bugfix b/changelog.d/17736.bugfix
new file mode 100644
index 0000000000..0d3fd06962
--- /dev/null
+++ b/changelog.d/17736.bugfix
@@ -0,0 +1 @@
+Fix saving of PNG thumbnails, when the original image is in the CMYK color space.
diff --git a/changelog.d/17783.feature b/changelog.d/17783.feature
new file mode 100644
index 0000000000..ce8c216418
--- /dev/null
+++ b/changelog.d/17783.feature
@@ -0,0 +1 @@
+Implement [MSC4210](https://github.com/matrix-org/matrix-spec-proposals/pull/4210): Remove legacy mentions. Contributed by @tulir @ Beeper.
diff --git a/changelog.d/17785.bugfix b/changelog.d/17785.bugfix
new file mode 100644
index 0000000000..df2898f54e
--- /dev/null
+++ b/changelog.d/17785.bugfix
@@ -0,0 +1 @@
+Fix bug with sliding sync where the server would not return state that was added to the `required_state` config.
diff --git a/changelog.d/17802.doc b/changelog.d/17802.doc
new file mode 100644
index 0000000000..72e653d3c4
--- /dev/null
+++ b/changelog.d/17802.doc
@@ -0,0 +1 @@
+Correct documentation to refer to the `--config-path` argument instead of `--config-file`.
diff --git a/changelog.d/17803.misc b/changelog.d/17803.misc
new file mode 100644
index 0000000000..a267df8b83
--- /dev/null
+++ b/changelog.d/17803.misc
@@ -0,0 +1 @@
+Test github token before running release script steps.
diff --git a/changelog.d/17805.bugfix b/changelog.d/17805.bugfix
new file mode 100644
index 0000000000..df2898f54e
--- /dev/null
+++ b/changelog.d/17805.bugfix
@@ -0,0 +1 @@
+Fix bug with sliding sync where the server would not return state that was added to the `required_state` config.
diff --git a/changelog.d/17824.misc b/changelog.d/17824.misc
new file mode 100644
index 0000000000..22574f00ec
--- /dev/null
+++ b/changelog.d/17824.misc
@@ -0,0 +1 @@
+Build debian packages for new Ubuntu versions, and stop building for no longer supported versions.
diff --git a/changelog.d/17825.doc b/changelog.d/17825.doc
new file mode 100644
index 0000000000..ee43667417
--- /dev/null
+++ b/changelog.d/17825.doc
@@ -0,0 +1 @@
+Fix typo in `target_cache_memory_usage` docs.
\ No newline at end of file
diff --git a/changelog.d/17826.misc b/changelog.d/17826.misc
new file mode 100644
index 0000000000..9148c96a0d
--- /dev/null
+++ b/changelog.d/17826.misc
@@ -0,0 +1 @@
+Enable the `.org.matrix.msc4028.encrypted_event` push rule by default in accordance with [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028). Note that the corresponding experimental feature must still be switched on for this push rule to have any effect.
\ No newline at end of file
diff --git a/changelog.d/17842.misc b/changelog.d/17842.misc
new file mode 100644
index 0000000000..78af706c31
--- /dev/null
+++ b/changelog.d/17842.misc
@@ -0,0 +1 @@
+Fix some typing issues uncovered by upgrading mypy to 1.11.x.
diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py
index d4ddeb4dc7..ca2e72b5e8 100755
--- a/contrib/cmdclient/console.py
+++ b/contrib/cmdclient/console.py
@@ -21,7 +21,8 @@
 #
 #
 
-""" Starts a synapse client console. """
+"""Starts a synapse client console."""
+
 import argparse
 import binascii
 import cmd
diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py
index 779590768f..1d74fee822 100644
--- a/contrib/graph/graph.py
+++ b/contrib/graph/graph.py
@@ -20,8 +20,8 @@
 #
 
 import argparse
-import cgi
 import datetime
+import html
 import json
 import urllib.request
 from typing import List
@@ -85,7 +85,7 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
             "name": name,
             "type": pdu.get("pdu_type"),
             "state_key": pdu.get("state_key"),
-            "content": cgi.escape(json.dumps(pdu.get("content")), quote=True),
+            "content": html.escape(json.dumps(pdu.get("content")), quote=True),
             "time": t,
             "depth": pdu.get("depth"),
         }
diff --git a/debian/changelog b/debian/changelog
index c285cc79eb..1995fbf6f6 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,123 @@
+matrix-synapse-py3 (1.117.0) stable; urgency=medium
+
+  * New Synapse release 1.117.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 15 Oct 2024 10:46:30 +0100
+
+matrix-synapse-py3 (1.117.0~rc1) stable; urgency=medium
+
+  * New Synapse release 1.117.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 08 Oct 2024 14:37:11 +0100
+
+matrix-synapse-py3 (1.116.0) stable; urgency=medium
+
+  * New Synapse release 1.116.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 01 Oct 2024 11:14:07 +0100
+
+matrix-synapse-py3 (1.116.0~rc2) stable; urgency=medium
+
+  * New synapse release 1.116.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org>  Thu, 26 Sep 2024 13:28:43 +0000
+
+matrix-synapse-py3 (1.116.0~rc1) stable; urgency=medium
+
+  * New synapse release 1.116.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Wed, 25 Sep 2024 09:34:07 +0000
+
+matrix-synapse-py3 (1.115.0) stable; urgency=medium
+
+  * New Synapse release 1.115.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 17 Sep 2024 14:32:10 +0100
+
+matrix-synapse-py3 (1.115.0~rc2) stable; urgency=medium
+
+  * New Synapse release 1.115.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org>  Thu, 12 Sep 2024 11:10:15 +0100
+
+matrix-synapse-py3 (1.115.0~rc1) stable; urgency=medium
+
+  * New Synapse release 1.115.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 10 Sep 2024 08:39:09 -0600
+
+matrix-synapse-py3 (1.114.0) stable; urgency=medium
+
+  * New Synapse release 1.114.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Mon, 02 Sep 2024 15:14:53 +0100
+
+matrix-synapse-py3 (1.114.0~rc3) stable; urgency=medium
+
+  * New Synapse release 1.114.0rc3.
+
+ -- Synapse Packaging team <packages@matrix.org>  Fri, 30 Aug 2024 16:38:05 +0100
+
+matrix-synapse-py3 (1.114.0~rc2) stable; urgency=medium
+
+  * New Synapse release 1.114.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org>  Fri, 30 Aug 2024 15:35:13 +0100
+
+matrix-synapse-py3 (1.114.0~rc1) stable; urgency=medium
+
+  * New synapse release 1.114.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 20 Aug 2024 12:55:28 +0000
+
+matrix-synapse-py3 (1.113.0) stable; urgency=medium
+
+  * New Synapse release 1.113.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 13 Aug 2024 14:36:56 +0100
+
+matrix-synapse-py3 (1.113.0~rc1) stable; urgency=medium
+
+  * New Synapse release 1.113.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 06 Aug 2024 12:23:23 +0100
+
+matrix-synapse-py3 (1.112.0) stable; urgency=medium
+
+  * New Synapse release 1.112.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 30 Jul 2024 17:15:48 +0100
+
+matrix-synapse-py3 (1.112.0~rc1) stable; urgency=medium
+
+  * New Synapse release 1.112.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 23 Jul 2024 08:58:55 -0600
+
+matrix-synapse-py3 (1.111.1) stable; urgency=medium
+
+  * New Synapse release 1.111.1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 30 Jul 2024 16:13:52 +0100
+
+matrix-synapse-py3 (1.111.0) stable; urgency=medium
+
+  * New Synapse release 1.111.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 16 Jul 2024 12:42:46 +0200
+
+matrix-synapse-py3 (1.111.0~rc2) stable; urgency=medium
+
+  * New synapse release 1.111.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org>  Wed, 10 Jul 2024 08:46:54 +0000
+
+matrix-synapse-py3 (1.111.0~rc1) stable; urgency=medium
+
+  * New synapse release 1.111.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 09 Jul 2024 09:49:25 +0000
+
 matrix-synapse-py3 (1.110.0) stable; urgency=medium
 
   * New Synapse release 1.110.0.
diff --git a/debian/hash_password.1 b/debian/hash_password.1
index 39fa3ffcbf..af55e09c45 100644
--- a/debian/hash_password.1
+++ b/debian/hash_password.1
@@ -1,10 +1,13 @@
-.\" generated with Ronn-NG/v0.8.0
-.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
-.TH "HASH_PASSWORD" "1" "July 2021" "" ""
+.\" generated with Ronn-NG/v0.10.1
+.\" http://github.com/apjanke/ronn-ng/tree/0.10.1
+.TH "HASH_PASSWORD" "1" "August 2024" ""
 .SH "NAME"
 \fBhash_password\fR \- Calculate the hash of a new password, so that passwords can be reset
 .SH "SYNOPSIS"
-\fBhash_password\fR [\fB\-p\fR|\fB\-\-password\fR [password]] [\fB\-c\fR|\fB\-\-config\fR \fIfile\fR]
+.TS
+allbox;
+\fBhash_password\fR [\fB\-p\fR	\fB\-\-password\fR [password]] [\fB\-c\fR	\fB\-\-config\fR \fIfile\fR]
+.TE
 .SH "DESCRIPTION"
 \fBhash_password\fR calculates the hash of a supplied password using bcrypt\.
 .P
@@ -20,7 +23,7 @@ bcrypt_rounds: 17 password_config: pepper: "random hashing pepper"
 .SH "OPTIONS"
 .TP
 \fB\-p\fR, \fB\-\-password\fR
-Read the password form the command line if [password] is supplied\. If not, prompt the user and read the password form the \fBSTDIN\fR\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\.
+Read the password form the command line if [password] is supplied, or from \fBSTDIN\fR\. If not, prompt the user and read the password from the tty prompt\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\.
 .TP
 \fB\-c\fR, \fB\-\-config\fR
 Read the supplied YAML \fIfile\fR containing the options \fBbcrypt_rounds\fR and the \fBpassword_config\fR section containing the \fBpepper\fR value\.
@@ -33,7 +36,17 @@ $2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8\.X8fWFpum7SxZ9MFe
 .fi
 .IP "" 0
 .P
-Hash from the STDIN:
+Hash from the stdin:
+.IP "" 4
+.nf
+$ cat password_file | hash_password
+Password:
+Confirm password:
+$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX\.rcuAbM8ErLoUhybG
+.fi
+.IP "" 0
+.P
+Hash from the prompt:
 .IP "" 4
 .nf
 $ hash_password
@@ -53,6 +66,6 @@ $2b$12$CwI\.wBNr\.w3kmiUlV3T5s\.GT2wH7uebDCovDrCOh18dFedlANK99O
 .fi
 .IP "" 0
 .SH "COPYRIGHT"
-This man page was written by Rahul De <\fI\%mailto:rahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
+This man page was written by Rahul De «rahulde@swecha\.net» for Debian GNU/Linux distribution\.
 .SH "SEE ALSO"
 synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
diff --git a/debian/hash_password.1.html b/debian/hash_password.1.html
new file mode 100644
index 0000000000..7a62787780
--- /dev/null
+++ b/debian/hash_password.1.html
@@ -0,0 +1,182 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <meta http-equiv='content-type' content='text/html;charset=utf-8'>
+  <meta name='generator' content='Ronn-NG/v0.10.1 (http://github.com/apjanke/ronn-ng/tree/0.10.1)'>
+  <title>hash_password(1) - Calculate the hash of a new password, so that passwords can be reset</title>
+  <style type='text/css' media='all'>
+  /* style: man */
+  body#manpage {margin:0}
+  .mp {max-width:100ex;padding:0 9ex 1ex 4ex}
+  .mp p,.mp pre,.mp ul,.mp ol,.mp dl {margin:0 0 20px 0}
+  .mp h2 {margin:10px 0 0 0}
+  .mp > p,.mp > pre,.mp > ul,.mp > ol,.mp > dl {margin-left:8ex}
+  .mp h3 {margin:0 0 0 4ex}
+  .mp dt {margin:0;clear:left}
+  .mp dt.flush {float:left;width:8ex}
+  .mp dd {margin:0 0 0 9ex}
+  .mp h1,.mp h2,.mp h3,.mp h4 {clear:left}
+  .mp pre {margin-bottom:20px}
+  .mp pre+h2,.mp pre+h3 {margin-top:22px}
+  .mp h2+pre,.mp h3+pre {margin-top:5px}
+  .mp img {display:block;margin:auto}
+  .mp h1.man-title {display:none}
+  .mp,.mp code,.mp pre,.mp tt,.mp kbd,.mp samp,.mp h3,.mp h4 {font-family:monospace;font-size:14px;line-height:1.42857142857143}
+  .mp h2 {font-size:16px;line-height:1.25}
+  .mp h1 {font-size:20px;line-height:2}
+  .mp {text-align:justify;background:#fff}
+  .mp,.mp code,.mp pre,.mp pre code,.mp tt,.mp kbd,.mp samp {color:#131211}
+  .mp h1,.mp h2,.mp h3,.mp h4 {color:#030201}
+  .mp u {text-decoration:underline}
+  .mp code,.mp strong,.mp b {font-weight:bold;color:#131211}
+  .mp em,.mp var {font-style:italic;color:#232221;text-decoration:none}
+  .mp a,.mp a:link,.mp a:hover,.mp a code,.mp a pre,.mp a tt,.mp a kbd,.mp a samp {color:#0000ff}
+  .mp b.man-ref {font-weight:normal;color:#434241}
+  .mp pre {padding:0 4ex}
+  .mp pre code {font-weight:normal;color:#434241}
+  .mp h2+pre,h3+pre {padding-left:0}
+  ol.man-decor,ol.man-decor li {margin:3px 0 10px 0;padding:0;float:left;width:33%;list-style-type:none;text-transform:uppercase;color:#999;letter-spacing:1px}
+  ol.man-decor {width:100%}
+  ol.man-decor li.tl {text-align:left}
+  ol.man-decor li.tc {text-align:center;letter-spacing:4px}
+  ol.man-decor li.tr {text-align:right;float:right}
+  </style>
+</head>
+<!--
+  The following styles are deprecated and will be removed at some point:
+  div#man, div#man ol.man, div#man ol.head, div#man ol.man.
+
+  The .man-page, .man-decor, .man-head, .man-foot, .man-title, and
+  .man-navigation should be used instead.
+-->
+<body id='manpage'>
+  <div class='mp' id='man'>
+
+  <div class='man-navigation' style='display:none'>
+    <a href="#NAME">NAME</a>
+    <a href="#SYNOPSIS">SYNOPSIS</a>
+    <a href="#DESCRIPTION">DESCRIPTION</a>
+    <a href="#FILES">FILES</a>
+    <a href="#OPTIONS">OPTIONS</a>
+    <a href="#EXAMPLES">EXAMPLES</a>
+    <a href="#COPYRIGHT">COPYRIGHT</a>
+    <a href="#SEE-ALSO">SEE ALSO</a>
+  </div>
+
+  <ol class='man-decor man-head man head'>
+    <li class='tl'>hash_password(1)</li>
+    <li class='tc'></li>
+    <li class='tr'>hash_password(1)</li>
+  </ol>
+
+  
+
+<h2 id="NAME">NAME</h2>
+<p class="man-name">
+  <code>hash_password</code> - <span class="man-whatis">Calculate the hash of a new password, so that passwords can be reset</span>
+</p>
+<h2 id="SYNOPSIS">SYNOPSIS</h2>
+
+<table>
+  <tbody>
+    <tr>
+      <td>
+<code>hash_password</code> [<code>-p</code>
+</td>
+      <td>
+<code>--password</code> [password]] [<code>-c</code>
+</td>
+      <td>
+<code>--config</code> <var>file</var>]</td>
+    </tr>
+  </tbody>
+</table>
+
+<h2 id="DESCRIPTION">DESCRIPTION</h2>
+
+<p><strong>hash_password</strong> calculates the hash of a supplied password using bcrypt.</p>
+
+<p><code>hash_password</code> takes a password as an parameter either on the command line
+or the <code>STDIN</code> if not supplied.</p>
+
+<p>It accepts an YAML file which can be used to specify parameters like the
+number of rounds for bcrypt and password_config section having the pepper
+value used for the hashing. By default <code>bcrypt_rounds</code> is set to <strong>12</strong>.</p>
+
+<p>The hashed password is written on the <code>STDOUT</code>.</p>
+
+<h2 id="FILES">FILES</h2>
+
+<p>A sample YAML file accepted by <code>hash_password</code> is described below:</p>
+
+<p>bcrypt_rounds: 17
+  password_config:
+    pepper: "random hashing pepper"</p>
+
+<h2 id="OPTIONS">OPTIONS</h2>
+
+<dl>
+<dt>
+<code>-p</code>, <code>--password</code>
+</dt>
+<dd>Read the password form the command line if [password] is supplied, or from <code>STDIN</code>.
+If not, prompt the user and read the password from the tty prompt.
+It is not recommended to type the password on the command line
+directly. Use the STDIN instead.</dd>
+<dt>
+<code>-c</code>, <code>--config</code>
+</dt>
+<dd>Read the supplied YAML <var>file</var> containing the options <code>bcrypt_rounds</code>
+and the <code>password_config</code> section containing the <code>pepper</code> value.</dd>
+</dl>
+
+<h2 id="EXAMPLES">EXAMPLES</h2>
+
+<p>Hash from the command line:</p>
+
+<pre><code>$ hash_password -p "p@ssw0rd"
+$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe
+</code></pre>
+
+<p>Hash from the stdin:</p>
+
+<pre><code>$ cat password_file | hash_password
+Password:
+Confirm password:
+$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
+</code></pre>
+
+<p>Hash from the prompt:</p>
+
+<pre><code>$ hash_password
+Password:
+Confirm password:
+$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
+</code></pre>
+
+<p>Using a config file:</p>
+
+<pre><code>$ hash_password -c config.yml
+Password:
+Confirm password:
+$2b$12$CwI.wBNr.w3kmiUlV3T5s.GT2wH7uebDCovDrCOh18dFedlANK99O
+</code></pre>
+
+<h2 id="COPYRIGHT">COPYRIGHT</h2>
+
+<p>This man page was written by Rahul De «rahulde@swecha.net»
+for Debian GNU/Linux distribution.</p>
+
+<h2 id="SEE-ALSO">SEE ALSO</h2>
+
+<p><span class="man-ref">synctl<span class="s">(1)</span></span>, <span class="man-ref">synapse_port_db<span class="s">(1)</span></span>, <span class="man-ref">register_new_matrix_user<span class="s">(1)</span></span>, <span class="man-ref">synapse_review_recent_signups<span class="s">(1)</span></span></p>
+
+  <ol class='man-decor man-foot man foot'>
+    <li class='tl'></li>
+    <li class='tc'>August 2024</li>
+    <li class='tr'>hash_password(1)</li>
+  </ol>
+
+  </div>
+</body>
+</html>
diff --git a/debian/hash_password.ronn b/debian/hash_password.ronn
index 5d0df53802..b68d4a210e 100644
--- a/debian/hash_password.ronn
+++ b/debian/hash_password.ronn
@@ -29,8 +29,8 @@ A sample YAML file accepted by `hash_password` is described below:
 ## OPTIONS
 
   * `-p`, `--password`:
-    Read the password form the command line if [password] is supplied.
-    If not, prompt the user and read the password form the `STDIN`.
+    Read the password form the command line if [password] is supplied, or from `STDIN`.
+    If not, prompt the user and read the password from the tty prompt.
     It is not recommended to type the password on the command line
     directly. Use the STDIN instead.
 
@@ -45,7 +45,14 @@ Hash from the command line:
     $ hash_password -p "p@ssw0rd"
     $2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe
 
-Hash from the STDIN:
+Hash from the stdin:
+
+    $ cat password_file | hash_password
+    Password:
+    Confirm password:
+    $2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
+
+Hash from the prompt:
 
     $ hash_password
     Password:
diff --git a/debian/templates b/debian/templates
index cab05715d0..7bfd3c2e9f 100644
--- a/debian/templates
+++ b/debian/templates
@@ -5,7 +5,7 @@ _Description: Name of the server:
  servers via federation. This is normally the public hostname of the
  server running synapse, but can be different if you set up delegation.
  Please refer to the delegation documentation in this case:
- https://github.com/element-hq/synapse/blob/master/docs/delegate.md.
+ https://element-hq.github.io/synapse/latest/delegate.html.
 
 Template: matrix-synapse/report-stats
 Type: boolean
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 1bef8045ca..1da196b12e 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11
 ###
 # We hardcode the use of Debian bookworm here because this could change upstream
 # and other Dockerfiles used for testing are expecting bookworm.
-FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS requirements
 
 # RUN --mount is specific to buildkit and is documented at
 # https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
@@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
 ###
 ### Stage 1: builder
 ###
-FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS builder
 
 # install the OS build deps
 RUN \
diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv
index f000144567..9266f134be 100644
--- a/docker/Dockerfile-dhvirtualenv
+++ b/docker/Dockerfile-dhvirtualenv
@@ -24,7 +24,7 @@ ARG distro=""
 # https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
 # it's not obviously easier to use that than to build our own.)
 
-FROM docker.io/library/${distro} as builder
+FROM docker.io/library/${distro} AS builder
 
 RUN apt-get update -qq -o Acquire::Languages=none
 RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 6588b3ce14..b9334cc53b 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -111,6 +111,9 @@ server_notices:
   system_mxid_avatar_url: ""
   room_name: "Server Alert"
 
+# Enable delayed events (msc4140)
+max_event_delay_duration: 24h
+
 
 # Disable sync cache so that initial `/sync` requests are up-to-date.
 caches:
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index b6690f3404..15d8d7b558 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -126,6 +126,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
             "^/_synapse/admin/v1/media/.*$",
             "^/_synapse/admin/v1/quarantine_media/.*$",
             "^/_matrix/client/v1/media/.*$",
+            "^/_matrix/federation/v1/media/.*$",
         ],
         # The first configured media worker will run the media background jobs
         "shared_extra_conf": {
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 2281385830..cb38e26005 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -1361,3 +1361,83 @@ Returns a `404` HTTP status code if no user was found, with a response body like
 ```
 
 _Added in Synapse 1.72.0._
+
+
+## Redact all the events of a user
+
+The API is 
+```
+POST /_synapse/admin/v1/user/$user_id/redact
+
+{
+  "rooms": ["!roomid1", "!roomid2"]
+}
+```
+If an empty list is provided as the key for `rooms`, all events in all the rooms the user is member of will be redacted, 
+otherwise all the events in the rooms provided in the request will be redacted. 
+
+The API starts redaction process running, and returns immediately with a JSON body with
+a redact id which can be used to query the status of the redaction process:
+
+```json
+{
+    "redact_id": "<opaque id>"
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - The fully qualified MXID of the user: for example, `@user:server.com`.
+
+The following JSON body parameter must be provided:
+
+-  `rooms` - A list of rooms to redact the user's events in. If an empty list is provided all events in all rooms
+  the user is a member of will be redacted
+
+_Added in Synapse 1.116.0._
+
+The following JSON body parameters are optional:
+
+- `reason` - Reason the redaction is being requested, ie "spam", "abuse", etc. This will be included in each redaction event, and be visible to users.
+- `limit` - a limit on the number of the user's events to search for ones that can be redacted (events are redacted newest to oldest) in each room, defaults to 1000 if not provided
+
+
+## Check the status of a redaction process
+
+It is possible to query the status of the background task for redacting a user's events.
+The status can be queried up to 24 hours after completion of the task,
+or until Synapse is restarted (whichever happens first).
+
+The API is:
+
+```
+GET /_synapse/admin/v1/user/redact_status/$redact_id
+```
+
+A response body like the following is returned:
+
+```
+{
+  "status": "active",
+  "failed_redactions": [],
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+* `redact_id` - string - The ID for this redaction process, provided when the redaction was requested.
+
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `status` - string - one of scheduled/active/completed/failed, indicating the status of the redaction job
+- `failed_redactions` - dictionary - the keys of the dict are event ids the process was unable to redact, if any, and the values are 
+  the corresponding error that caused the redaction to fail
+
+_Added in Synapse 1.116.0._
\ No newline at end of file
diff --git a/docs/code_style.md b/docs/code_style.md
index 026001b8a3..c28aaadad0 100644
--- a/docs/code_style.md
+++ b/docs/code_style.md
@@ -8,9 +8,7 @@ errors in code.
 
 The necessary tools are:
 
-- [black](https://black.readthedocs.io/en/stable/), a source code formatter;
-- [isort](https://pycqa.github.io/isort/), which organises each file's imports;
-- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors; and
+- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors and enforce a consistent style; and
 - [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
 
 See [the contributing guide](development/contributing_guide.md#run-the-linters) for instructions
diff --git a/docs/development/room-dag-concepts.md b/docs/development/room-dag-concepts.md
index 76709487f8..35b667831c 100644
--- a/docs/development/room-dag-concepts.md
+++ b/docs/development/room-dag-concepts.md
@@ -21,8 +21,10 @@ incrementing integer, but backfilled events start with `stream_ordering=-1` and
 
 ---
 
- - `/sync` returns things in the order they arrive at the server (`stream_ordering`).
- - `/messages` (and `/backfill` in the federation API) return them in the order determined by the event graph `(topological_ordering, stream_ordering)`.
+ - Incremental `/sync?since=xxx` returns things in the order they arrive at the server
+   (`stream_ordering`).
+ - Initial `/sync`, `/messages` (and `/backfill` in the federation API) return them in
+   the order determined by the event graph `(topological_ordering, stream_ordering)`.
 
 The general idea is that, if you're following a room in real-time (i.e.
 `/sync`), you probably want to see the messages as they arrive at your server,
diff --git a/docs/element_logo_white_bg.svg b/docs/element_logo_white_bg.svg
new file mode 100644
index 0000000000..50195ab1c8
--- /dev/null
+++ b/docs/element_logo_white_bg.svg
@@ -0,0 +1,94 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   width="41.440346mm"
+   height="10.383124mm"
+   viewBox="0 0 41.440346 10.383125"
+   version="1.1"
+   id="svg1"
+   xml:space="preserve"
+   sodipodi:docname="element_logo_white_bg.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
+     id="namedview1"
+     pagecolor="#ffffff"
+     bordercolor="#000000"
+     borderopacity="0.25"
+     inkscape:showpageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:deskcolor="#d1d1d1"
+     inkscape:document-units="mm"
+     showgrid="false"
+     inkscape:export-bgcolor="#ffffffff" /><defs
+     id="defs1" /><g
+     id="layer1"
+     transform="translate(-84.803844,-143.2075)"
+     inkscape:export-filename="element_logo_white_bg.svg"
+     inkscape:export-xdpi="96"
+     inkscape:export-ydpi="96"><g
+       style="fill:none"
+       id="g1"
+       transform="matrix(0.26458333,0,0,0.26458333,85.841658,144.26667)"><rect
+         style="display:inline;fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:1.31041;stroke-dasharray:none;stroke-opacity:1"
+         id="rect20"
+         width="155.31451"
+         height="37.932892"
+         x="-3.2672384"
+         y="-3.3479743"
+         rx="3.3718522"
+         ry="3.7915266"
+         transform="translate(-2.1259843e-6)"
+         inkscape:label="rect20"
+         inkscape:export-filename="rect20.svg"
+         inkscape:export-xdpi="96"
+         inkscape:export-ydpi="96" /><path
+         fill-rule="evenodd"
+         clip-rule="evenodd"
+         d="M 16,32 C 24.8366,32 32,24.8366 32,16 32,7.16344 24.8366,0 16,0 7.16344,0 0,7.16344 0,16 0,24.8366 7.16344,32 16,32 Z"
+         fill="#0dbd8b"
+         id="path1" /><path
+         fill-rule="evenodd"
+         clip-rule="evenodd"
+         d="m 13.0756,7.455 c 0,-0.64584 0.5247,-1.1694 1.1719,-1.1694 4.3864,0 7.9423,3.54853 7.9423,7.9259 0,0.6458 -0.5246,1.1694 -1.1718,1.1694 -0.6472,0 -1.1719,-0.5236 -1.1719,-1.1694 0,-3.0857 -2.5066,-5.58711 -5.5986,-5.58711 -0.6472,0 -1.1719,-0.52355 -1.1719,-1.16939 z"
+         fill="#ffffff"
+         id="path2" /><path
+         fill-rule="evenodd"
+         clip-rule="evenodd"
+         d="m 24.5424,13.042 c 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,4.3773 -3.5559,7.9258 -7.9424,7.9258 -0.6472,0 -1.1718,-0.5235 -1.1718,-1.1693 0,-0.6459 0.5246,-1.1694 1.1718,-1.1694 3.0921,0 5.5987,-2.5015 5.5987,-5.5871 0,-0.6459 0.5247,-1.1694 1.1718,-1.1694 z"
+         fill="#ffffff"
+         id="path3" /><path
+         fill-rule="evenodd"
+         clip-rule="evenodd"
+         d="m 18.9446,24.5446 c 0,0.6459 -0.5247,1.1694 -1.1718,1.1694 -4.3865,0 -7.94239,-3.5485 -7.94239,-7.9258 0,-0.6459 0.52469,-1.1694 1.17179,-1.1694 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,3.0856 2.5066,5.587 5.5987,5.587 0.6471,0 1.1718,0.5236 1.1718,1.1694 z"
+         fill="#ffffff"
+         id="path4" /><path
+         fill-rule="evenodd"
+         clip-rule="evenodd"
+         d="m 7.45823,18.9576 c -0.64718,0 -1.17183,-0.5235 -1.17183,-1.1694 0,-4.3773 3.55591,-7.92581 7.9423,-7.92581 0.6472,0 1.1719,0.52351 1.1719,1.16941 0,0.6458 -0.5247,1.1694 -1.1719,1.1694 -3.092,0 -5.59864,2.5014 -5.59864,5.587 0,0.6459 -0.52465,1.1694 -1.17183,1.1694 z"
+         fill="#ffffff"
+         id="path5" /><path
+         d="M 56.2856,18.1428 H 44.9998 c 0.1334,1.181 0.5619,2.1238 1.2858,2.8286 0.7238,0.6857 1.6761,1.0286 2.8571,1.0286 0.7809,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3428,-1.5428 h 3.4286 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1714,0 3.9238,0.73333 5.2572,2.20003 1.3523,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0667,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6858,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
+         fill="#000000"
+         id="path6" /><path
+         d="M 58.6539,20.1428 V 3.14282 h 3.4 V 20.2 c 0,0.7619 0.419,1.1428 1.2571,1.1428 l 0.6,-0.0285 v 3.2285 c -0.3238,0.0572 -0.6667,0.0857 -1.0286,0.0857 -1.4666,0 -2.5428,-0.3714 -3.2285,-1.1142 -0.6667,-0.7429 -1,-1.8667 -1,-3.3715 z"
+         fill="#000000"
+         id="path7" /><path
+         d="M 79.7454,18.1428 H 68.4597 c 0.1333,1.181 0.5619,2.1238 1.2857,2.8286 0.7238,0.6857 1.6762,1.0286 2.8571,1.0286 0.781,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3429,-1.5428 h 3.4285 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1715,0 3.9238,0.73333 5.2572,2.20003 1.3524,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0666,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6857,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
+         fill="#000000"
+         id="path8" /><path
+         d="m 95.0851,16.0571 v 8.5143 h -3.4 v -8.8857 c 0,-2.2476 -0.9333,-3.3714 -2.8,-3.3714 -1.0095,0 -1.819,0.3238 -2.4286,0.9714 -0.5904,0.6476 -0.8857,1.5333 -0.8857,2.6571 v 8.6286 h -3.4 V 9.74282 h 3.1429 v 1.97148 c 0.3619,-0.6667 0.9143,-1.2191 1.6571,-1.6572 0.7429,-0.43809 1.6667,-0.65713 2.7714,-0.65713 2.0572,0 3.5429,0.78093 4.4572,2.34283 1.2571,-1.5619 2.9333,-2.34283 5.0286,-2.34283 1.733,0 3.067,0.54285 4,1.62853 0.933,1.0667 1.4,2.4762 1.4,4.2286 v 9.3143 h -3.4 v -8.8857 c 0,-2.2476 -0.933,-3.3714 -2.8,-3.3714 -1.0286,0 -1.8477,0.3333 -2.4572,1 -0.5905,0.6476 -0.8857,1.5619 -0.8857,2.7428 z"
+         fill="#000000"
+         id="path9" /><path
+         d="m 121.537,18.1428 h -11.286 c 0.133,1.181 0.562,2.1238 1.286,2.8286 0.723,0.6857 1.676,1.0286 2.857,1.0286 0.781,0 1.486,-0.1905 2.114,-0.5715 0.629,-0.3809 1.076,-0.8952 1.343,-1.5428 h 3.429 c -0.458,1.5047 -1.315,2.7238 -2.572,3.6571 -1.238,0.9143 -2.705,1.3715 -4.4,1.3715 -2.209,0 -4,-0.7334 -5.371,-2.2 -1.353,-1.4667 -2.029,-3.3239 -2.029,-5.5715 0,-2.1905 0.686,-4.0285 2.057,-5.5143 1.372,-1.4857 3.143,-2.22853 5.315,-2.22853 2.171,0 3.923,0.73333 5.257,2.20003 1.352,1.4476 2.028,3.2762 2.028,5.4857 z m -7.257,-5.9714 c -1.067,0 -1.953,0.3143 -2.658,0.9429 -0.704,0.6285 -1.142,1.4666 -1.314,2.5142 h 7.886 c -0.153,-1.0476 -0.572,-1.8857 -1.257,-2.5142 -0.686,-0.6286 -1.572,-0.9429 -2.657,-0.9429 z"
+         fill="#000000"
+         id="path10" /><path
+         d="m 127.105,9.74282 v 1.97148 c 0.343,-0.6477 0.905,-1.1905 1.686,-1.6286 0.8,-0.45716 1.762,-0.68573 2.885,-0.68573 1.753,0 3.105,0.53333 4.058,1.60003 0.971,1.0666 1.457,2.4857 1.457,4.2571 v 9.3143 h -3.4 v -8.8857 c 0,-1.0476 -0.248,-1.8667 -0.743,-2.4572 -0.476,-0.6095 -1.21,-0.9142 -2.2,-0.9142 -1.086,0 -1.943,0.3238 -2.572,0.9714 -0.609,0.6476 -0.914,1.5428 -0.914,2.6857 v 8.6 h -3.4 V 9.74282 Z"
+         fill="#000000"
+         id="path11" /><path
+         d="m 147.12,21.5428 v 2.9429 c -0.419,0.1143 -1.009,0.1714 -1.771,0.1714 -2.895,0 -4.343,-1.4571 -4.343,-4.3714 v -7.8286 h -2.257 V 9.74282 h 2.257 V 5.88568 h 3.4 v 3.85714 h 2.772 v 2.71428 h -2.772 v 7.4857 c 0,1.1619 0.552,1.7429 1.657,1.7429 z"
+         fill="#000000"
+         id="path12" /></g></g></svg>
diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
index ffdfe6082e..ec306d81ab 100644
--- a/docs/modules/spam_checker_callbacks.md
+++ b/docs/modules/spam_checker_callbacks.md
@@ -76,8 +76,9 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a
 async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
 ```
 
-Called when processing an invitation. Both inviter and invitee are
-represented by their Matrix user ID (e.g. `@alice:example.com`).
+Called when processing an invitation, both when one is created locally or when
+receiving an invite over federation. Both inviter and invitee are represented by
+their Matrix user ID (e.g. `@alice:example.com`).
 
 
 The callback must return one of:
@@ -112,7 +113,9 @@ async def user_may_send_3pid_invite(
 ```
 
 Called when processing an invitation using a third-party identifier (also called a 3PID,
-e.g. an email address or a phone number). 
+e.g. an email address or a phone number). It is only called when a 3PID invite is created
+locally - not when one is received in a room over federation. If the 3PID is already associated
+with a Matrix ID, the spam check will go through the `user_may_invite` callback instead.
 
 The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the
 invitee is represented by its medium (e.g. "email") and its address
diff --git a/docs/setup/forward_proxy.md b/docs/setup/forward_proxy.md
index 3482691f83..f02c7b5fc5 100644
--- a/docs/setup/forward_proxy.md
+++ b/docs/setup/forward_proxy.md
@@ -67,7 +67,7 @@ in Synapse can be deactivated.
 **NOTE**: This has an impact on security and is for testing purposes only!
 
 To deactivate the certificate validation, the following setting must be added to
-your [homserver.yaml](../usage/configuration/homeserver_sample_config.md).
+your [homeserver.yaml](../usage/configuration/homeserver_sample_config.md).
 
 ```yaml
 use_insecure_ssl_client_just_for_testing_do_not_use: true
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index ed3e59c470..9cebb89b4d 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -52,8 +52,6 @@ architecture via <https://packages.matrix.org/debian/>.
 
 To install the latest release:
 
-TODO UPDATE ALL THIS
-
 ```sh
 sudo apt install -y lsb-release wget apt-transport-https
 sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
@@ -309,7 +307,62 @@ sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
                  libwebp-devel libxml2-devel libxslt-devel libpq-devel \
                  python3-virtualenv libffi-devel openssl-devel python3-devel \
                  libicu-devel
-sudo dnf groupinstall "Development Tools"
+sudo dnf group install "Development Tools"
+```
+
+##### Red Hat Enterprise Linux / Rocky Linux
+
+*Note: The term "RHEL" below refers to both Red Hat Enterprise Linux and Rocky Linux. The distributions are 1:1 binary compatible.*
+
+It's recommended to use the latest Python versions.
+
+RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ship with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them.
+
+Python 3.11 and 3.12 are available for both RHEL 8 and 9.
+
+These commands should be run as root user.
+
+RHEL 8
+```bash
+# Enable PowerTools repository
+dnf config-manager --set-enabled powertools
+```
+RHEL 9
+```bash
+# Enable CodeReady Linux Builder repository
+crb enable
+```
+
+Install new version of Python. You only need one of these:
+```bash
+# Python 3.11
+dnf install python3.11 python3.11-devel
+```
+```bash
+# Python 3.12
+dnf install python3.12 python3.12-devel
+```
+Finally, install common prerequisites
+```bash
+dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf
+dnf group install "Development Tools"
+```
+###### Using venv module instead of virtualenv command
+
+It's recommended to use Python venv module directly rather than the virtualenv command.
+* On RHEL 9, virtualenv is only available on [EPEL](https://docs.fedoraproject.org/en-US/epel/).
+* On RHEL 8, virtualenv is based on Python 3.6. It does not support creating 3.11/3.12 virtual environments.
+
+Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI.
+
+```bash
+mkdir -p ~/synapse
+# To use Python 3.11, simply use the command "python3.11" instead.
+python3.12 -m venv ~/synapse/env
+source ~/synapse/env/bin/activate
+pip install --upgrade pip
+pip install --upgrade setuptools
+pip install matrix-synapse
 ```
 
 ##### macOS
diff --git a/docs/upgrade.md b/docs/upgrade.md
index cf53f56b06..52b1adbe90 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -119,13 +119,14 @@ stacking them up. You can monitor the currently running background updates with
 
 # Upgrading to v1.111.0
 
-## New worker endpoints for authenticated client media
+## New worker endpoints for authenticated client and federation media
 
 [Media repository workers](./workers.md#synapseappmedia_repository) handling
-Media APIs can now handle the following endpoint pattern:
+Media APIs can now handle the following endpoint patterns:
 
 ```
 ^/_matrix/client/v1/media/.*$
+^/_matrix/federation/v1/media/.*$
 ```
 
 Please update your reverse proxy configuration.
diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md
index a1184d0375..0dce3d3e37 100644
--- a/docs/usage/administration/admin_faq.md
+++ b/docs/usage/administration/admin_faq.md
@@ -255,6 +255,8 @@ line to `/etc/default/matrix-synapse`:
 
     LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
 
+*Note*: You may need to set `PYTHONMALLOC=malloc` to ensure that `jemalloc` can accurately calculate memory usage. By default, Python uses its internal small-object allocator, which may interfere with jemalloc's ability to track memory consumption correctly. This could prevent the [cache_autotuning](../configuration/config_documentation.md#caches-and-associated-values) feature from functioning as expected, as the Python allocator may not reach the memory threshold set by `max_cache_memory_usage`, thus not triggering the cache eviction process.
+
 This made a significant difference on Python 2.7 - it's unclear how
 much of an improvement it provides on Python 3.x.
 
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 65b03ad0f8..47e3ef1287 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -246,6 +246,7 @@ Example configuration:
 ```yaml
 presence:
   enabled: false
+  include_offline_users_on_sync: false
 ```
 
 `enabled` can also be set to a special value of "untracked" which ignores updates
@@ -254,6 +255,10 @@ received via clients and federation, while still accepting updates from the
 
 *The "untracked" option was added in Synapse 1.96.0.*
 
+When clients perform an initial or `full_state` sync, presence results for offline users are
+not included by default. Setting `include_offline_users_on_sync` to `true` will always include
+offline users in the results. Defaults to false.
+
 ---
 ### `require_auth_for_profile_requests`
 
@@ -504,7 +509,8 @@ Unix socket support (_Added in Synapse 1.89.0_):
 
 Valid resource names are:
 
-* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
+* `client`: the client-server API (/_matrix/client). Also implies `media` and `static`.
+  If configuring the main process, the Synapse Admin API (/_synapse/admin) is also implied.
 
 * `consent`: user consent forms (/_matrix/consent). See [here](../../consent_tracking.md) for more.
 
@@ -755,6 +761,19 @@ email:
     password_reset: "[%(server_name)s] Password reset"
     email_validation: "[%(server_name)s] Validate your email"
 ```
+---
+### `max_event_delay_duration`
+
+The maximum allowed duration by which sent events can be delayed, as per
+[MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140).
+Must be a positive value if set.
+
+Defaults to no duration (`null`), which disallows sending delayed events.
+
+Example configuration:
+```yaml
+max_event_delay_duration: 24h
+```
 
 ## Homeserver blocking
 Useful options for Synapse admins.
@@ -1415,7 +1434,7 @@ number of entries that can be stored.
    Please see the [Config Conventions](#config-conventions) for information on how to specify memory size and cache expiry
    durations.
      * `max_cache_memory_usage` sets a ceiling on how much memory the cache can use before caches begin to be continuously evicted.
-        They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
+        They will continue to be evicted until the memory usage drops below the `target_cache_memory_usage`, set in
         the setting below, or until the `min_cache_ttl` is hit. There is no default value for this option.
      * `target_cache_memory_usage` sets a rough target for the desired memory usage of the caches. There is no default value
         for this option.
@@ -1760,7 +1779,7 @@ rc_3pid_validation:
 
 This option sets ratelimiting how often invites can be sent in a room or to a
 specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10`,
-`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer` 
+`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer`
 defaults to `per_second: 0.3`, `burst_count: 10`.
 
 Client requests that invite user(s) when [creating a
@@ -1863,6 +1882,18 @@ federation_rr_transactions_per_room_per_second: 40
 ## Media Store
 Config options related to Synapse's media store.
 
+---
+### `enable_authenticated_media`
+
+When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy
+unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false
+after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
+this will change to true in a future Synapse release.
+
+Example configuration:
+```yaml
+enable_authenticated_media: true
+```
 ---
 ### `enable_media_repo`
 
@@ -1949,7 +1980,7 @@ max_image_pixels: 35M
 ---
 ### `remote_media_download_burst_count`
 
-Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized - if the bucket is full, ie a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains.  Defaults to 500MiB. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests.  
+Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized - if the bucket is full, ie a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains.  Defaults to 500MiB. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests.
 
 Example configuration:
 ```yaml
@@ -2297,6 +2328,22 @@ Example configuration:
 ```yaml
 turn_shared_secret: "YOUR_SHARED_SECRET"
 ```
+---
+### `turn_shared_secret_path`
+
+An alternative to [`turn_shared_secret`](#turn_shared_secret):
+allows the shared secret to be specified in an external file.
+
+The file should be a plain text file, containing only the shared secret.
+Synapse reads the shared secret from the given file once at startup.
+
+Example configuration:
+```yaml
+turn_shared_secret_path: /path/to/secrets/file
+```
+
+_Added in Synapse 1.116.0._
+
 ---
 ### `turn_username` and `turn_password`
 
@@ -2369,7 +2416,7 @@ enable_registration_without_verification: true
 ---
 ### `registrations_require_3pid`
 
-If this is set, users must provide all of the specified types of 3PID when registering an account.
+If this is set, users must provide all of the specified types of [3PID](https://spec.matrix.org/latest/appendices/#3pid-types) when registering an account.
 
 Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
 
@@ -2394,6 +2441,9 @@ disable_msisdn_registration: true
 
 Mandate that users are only allowed to associate certain formats of
 3PIDs with accounts on this server, as specified by the `medium` and `pattern` sub-options.
+`pattern` is a [Perl-like regular expression](https://docs.python.org/3/library/re.html#module-re).
+
+More information about 3PIDs, allowed `medium` types and their `address` syntax can be found [in the Matrix spec](https://spec.matrix.org/latest/appendices/#3pid-types).
 
 Example configuration:
 ```yaml
@@ -2403,7 +2453,7 @@ allowed_local_3pids:
   - medium: email
     pattern: '^[^@]+@vector\.im$'
   - medium: msisdn
-    pattern: '\+44'
+    pattern: '^44\d{10}$'
 ```
 ---
 ### `enable_3pid_lookup`
@@ -3282,8 +3332,8 @@ saml2_config:
     contact_person:
       - given_name: Bob
         sur_name: "the Sysadmin"
-        email_address": ["admin@example.com"]
-        contact_type": technical
+        email_address: ["admin@example.com"]
+        contact_type: technical
 
   saml_session_lifetime: 5m
 
@@ -3672,6 +3722,8 @@ Additional sub-options for this setting include:
    Required if `enabled` is set to true.
 * `subject_claim`: Name of the claim containing a unique identifier for the user.
    Optional, defaults to `sub`.
+* `display_name_claim`: Name of the claim containing the display name for the user. Optional.
+   If provided, the display name will be set to the value of this claim upon first login.
 * `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the
    "iss" claim will be required and validated for all JSON web tokens.
 * `audiences`: A list of audiences to validate the "aud" claim against. Optional.
@@ -3686,6 +3738,7 @@ jwt_config:
     secret: "provided-by-your-issuer"
     algorithm: "provided-by-your-issuer"
     subject_claim: "name_of_claim"
+    display_name_claim: "name_of_claim"
     issuer: "provided-by-your-issuer"
     audiences:
         - "provided-by-your-issuer"
@@ -4134,6 +4187,38 @@ default_power_level_content_override:
    trusted_private_chat: null
    public_chat: null
 ```
+
+The default power levels for each preset are:
+```yaml
+"m.room.name": 50
+"m.room.power_levels": 100
+"m.room.history_visibility": 100
+"m.room.canonical_alias": 50
+"m.room.avatar": 50
+"m.room.tombstone": 100
+"m.room.server_acl": 100
+"m.room.encryption": 100
+```
+
+So a complete example where the default power-levels for a preset are maintained
+but the power level for a new key is set is:
+```yaml
+default_power_level_content_override:
+   private_chat:
+    events:
+      "com.example.foo": 0
+      "m.room.name": 50
+      "m.room.power_levels": 100
+      "m.room.history_visibility": 100
+      "m.room.canonical_alias": 50
+      "m.room.avatar": 50
+      "m.room.tombstone": 100
+      "m.room.server_acl": 100
+      "m.room.encryption": 100
+   trusted_private_chat: null
+   public_chat: null
+```
+
 ---
 ### `forget_rooms_on_leave`
 
@@ -4286,7 +4371,13 @@ It is possible to scale the processes that handle sending outbound federation re
 by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
 a `federation_sender_instances` map. Doing so will remove handling of this function from
 the main process. Multiple workers can be added to this map, in which case the work is
-balanced across them.
+balanced across them. 
+
+The way that the load balancing works is any outbound federation request will be assigned 
+to a federation sender worker based on the hash of the destination server name. This
+means that all requests being sent to the same destination will be processed by the same
+worker instance. Multiple `federation_sender_instances` are useful if there is a federation
+with multiple servers.
 
 This configuration setting must be shared between all workers handling federation
 sending, and if changed all federation sender workers must be stopped at the same time
@@ -4436,6 +4527,9 @@ This setting has the following sub-options:
 * `path`: The full path to a local Unix socket file. **If this is used, `host` and
  `port` are ignored.** Defaults to `/tmp/redis.sock'
 * `password`: Optional password if configured on the Redis instance.
+* `password_path`: Alternative to `password`, reading the password from an
+   external file. The file should be a plain text file, containing only the
+   password. Synapse reads the password from the given file once at startup.
 * `dbid`: Optional redis dbid if needs to connect to specific redis logical db.
 * `use_tls`: Whether to use tls connection. Defaults to false.
 * `certificate_file`: Optional path to the certificate file
@@ -4449,13 +4543,16 @@ This setting has the following sub-options:
 
   _Changed in Synapse 1.85.0: Added path option to use a local Unix socket_
 
+  _Changed in Synapse 1.116.0: Added password\_path_
+
 Example configuration:
 ```yaml
 redis:
   enabled: true
   host: localhost
   port: 6379
-  password: <secret_password>
+  password_path: <path_to_the_password_file>
+  # OR password: <secret_password>
   dbid: <dbid>
   #use_tls: True
   #certificate_file: <path_to_the_certificate_file>
@@ -4633,7 +4730,9 @@ This setting has the following sub-options:
 * `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
    for direct messages. Defaults to false.
 * `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
-* `worker_to_run_on`: Which worker to run this module on. This must match the "worker_name".
+* `worker_to_run_on`: Which worker to run this module on. This must match 
+  the "worker_name". If not set or `null`, invites will be accepted on the
+  main process.
 
 NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed.
 The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join
diff --git a/docs/workers.md b/docs/workers.md
index 22fde488a9..0116c455bc 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou
 
 You can start the main Synapse process with Poetry by running the following command:
 ```console
-poetry run synapse_homeserver --config-file [your homeserver.yaml]
+poetry run synapse_homeserver --config-path [your homeserver.yaml]
 ```
 For worker setups, you can run the following command
 ```console
-poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml]
+poetry run synapse_worker --config-path [your homeserver.yaml] --config-path [your worker.yaml]
 ```
 ## Available worker applications
 
@@ -290,6 +290,7 @@ information.
 Additionally, the following REST endpoints can be handled for GET requests:
 
     ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
+    ^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
 
 Pagination requests can also be handled, but all requests for a given
 room must be routed to the same instance. Additionally, care must be taken to
@@ -740,6 +741,7 @@ Handles the media repository. It can handle all endpoints starting with:
 
     /_matrix/media/
     /_matrix/client/v1/media/
+    /_matrix/federation/v1/media/
 
 ... and the following regular expressions matching media-specific administration APIs:
 
diff --git a/poetry.lock b/poetry.lock
index 3a322b773e..42d7f03c5c 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,14 +1,14 @@
-# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
 
 [[package]]
 name = "annotated-types"
-version = "0.5.0"
+version = "0.7.0"
 description = "Reusable constraint types to use with typing.Annotated"
 optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
 files = [
-    {file = "annotated_types-0.5.0-py3-none-any.whl", hash = "sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd"},
-    {file = "annotated_types-0.5.0.tar.gz", hash = "sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802"},
+    {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
+    {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
 ]
 
 [package.dependencies]
@@ -16,32 +16,32 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
 
 [[package]]
 name = "attrs"
-version = "23.2.0"
+version = "24.2.0"
 description = "Classes Without Boilerplate"
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"},
-    {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"},
+    {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"},
+    {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"},
 ]
 
 [package.extras]
-cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
-dev = ["attrs[tests]", "pre-commit"]
-docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
-tests = ["attrs[tests-no-zope]", "zope-interface"]
-tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"]
-tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"]
+benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
+tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
 
 [[package]]
 name = "authlib"
-version = "1.3.1"
+version = "1.3.2"
 description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
 optional = true
 python-versions = ">=3.8"
 files = [
-    {file = "Authlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:d35800b973099bbadc49b42b256ecb80041ad56b7fe1216a362c7943c088f377"},
-    {file = "authlib-1.3.1.tar.gz", hash = "sha256:7ae843f03c06c5c0debd63c9db91f9fda64fa62a42a77419fa15fbb7e7a58917"},
+    {file = "Authlib-1.3.2-py2.py3-none-any.whl", hash = "sha256:ede026a95e9f5cdc2d4364a52103f5405e75aa156357e831ef2bfd0bc5094dfc"},
+    {file = "authlib-1.3.2.tar.gz", hash = "sha256:4b16130117f9eb82aa6eec97f6dd4673c3f960ac0283ccdae2897ee4bc030ba2"},
 ]
 
 [package.dependencies]
@@ -67,90 +67,44 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
 
 [[package]]
 name = "bcrypt"
-version = "4.1.3"
+version = "4.2.0"
 description = "Modern password hashing for your software and your servers"
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"},
-    {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"},
-    {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a"},
-    {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05"},
-    {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3"},
-    {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15"},
-    {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d"},
-    {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286"},
-    {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64"},
-    {file = "bcrypt-4.1.3-cp37-abi3-win32.whl", hash = "sha256:037c5bf7c196a63dcce75545c8874610c600809d5d82c305dd327cd4969995bf"},
-    {file = "bcrypt-4.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:8a893d192dfb7c8e883c4576813bf18bb9d59e2cfd88b68b725990f033f1b978"},
-    {file = "bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c"},
-    {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a"},
-    {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84"},
-    {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08"},
-    {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611"},
-    {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6"},
-    {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834"},
-    {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73"},
-    {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d"},
-    {file = "bcrypt-4.1.3-cp39-abi3-win32.whl", hash = "sha256:6004f5229b50f8493c49232b8e75726b568535fd300e5039e255d919fc3a07f2"},
-    {file = "bcrypt-4.1.3-cp39-abi3-win_amd64.whl", hash = "sha256:2505b54afb074627111b5a8dc9b6ae69d0f01fea65c2fcaea403448c503d3991"},
-    {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:cb9c707c10bddaf9e5ba7cdb769f3e889e60b7d4fea22834b261f51ca2b89fed"},
-    {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9f8ea645eb94fb6e7bea0cf4ba121c07a3a182ac52876493870033141aa687bc"},
-    {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f44a97780677e7ac0ca393bd7982b19dbbd8d7228c1afe10b128fd9550eef5f1"},
-    {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d84702adb8f2798d813b17d8187d27076cca3cd52fe3686bb07a9083930ce650"},
-    {file = "bcrypt-4.1.3.tar.gz", hash = "sha256:2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623"},
+    {file = "bcrypt-4.2.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb"},
+    {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00"},
+    {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d84cf6d877918620b687b8fd1bf7781d11e8a0998f576c7aa939776b512b98d"},
+    {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:1bb429fedbe0249465cdd85a58e8376f31bb315e484f16e68ca4c786dcc04291"},
+    {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:655ea221910bcac76ea08aaa76df427ef8625f92e55a8ee44fbf7753dbabb328"},
+    {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:1ee38e858bf5d0287c39b7a1fc59eec64bbf880c7d504d3a06a96c16e14058e7"},
+    {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0da52759f7f30e83f1e30a888d9163a81353ef224d82dc58eb5bb52efcabc399"},
+    {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3698393a1b1f1fd5714524193849d0c6d524d33523acca37cd28f02899285060"},
+    {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:762a2c5fb35f89606a9fde5e51392dad0cd1ab7ae64149a8b935fe8d79dd5ed7"},
+    {file = "bcrypt-4.2.0-cp37-abi3-win32.whl", hash = "sha256:5a1e8aa9b28ae28020a3ac4b053117fb51c57a010b9f969603ed885f23841458"},
+    {file = "bcrypt-4.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:8f6ede91359e5df88d1f5c1ef47428a4420136f3ce97763e31b86dd8280fbdf5"},
+    {file = "bcrypt-4.2.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:c52aac18ea1f4a4f65963ea4f9530c306b56ccd0c6f8c8da0c06976e34a6e841"},
+    {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bbbfb2734f0e4f37c5136130405332640a1e46e6b23e000eeff2ba8d005da68"},
+    {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3413bd60460f76097ee2e0a493ccebe4a7601918219c02f503984f0a7ee0aebe"},
+    {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8d7bb9c42801035e61c109c345a28ed7e84426ae4865511eb82e913df18f58c2"},
+    {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3d3a6d28cb2305b43feac298774b997e372e56c7c7afd90a12b3dc49b189151c"},
+    {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9c1c4ad86351339c5f320ca372dfba6cb6beb25e8efc659bedd918d921956bae"},
+    {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:27fe0f57bb5573104b5a6de5e4153c60814c711b29364c10a75a54bb6d7ff48d"},
+    {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:8ac68872c82f1add6a20bd489870c71b00ebacd2e9134a8aa3f98a0052ab4b0e"},
+    {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8"},
+    {file = "bcrypt-4.2.0-cp39-abi3-win32.whl", hash = "sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34"},
+    {file = "bcrypt-4.2.0-cp39-abi3-win_amd64.whl", hash = "sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9"},
+    {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a"},
+    {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db"},
+    {file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:1ff39b78a52cf03fdf902635e4c81e544714861ba3f0efc56558979dd4f09170"},
+    {file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184"},
+    {file = "bcrypt-4.2.0.tar.gz", hash = "sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221"},
 ]
 
 [package.extras]
 tests = ["pytest (>=3.2.1,!=3.3.0)"]
 typecheck = ["mypy"]
 
-[[package]]
-name = "black"
-version = "24.4.2"
-description = "The uncompromising code formatter."
-optional = false
-python-versions = ">=3.8"
-files = [
-    {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"},
-    {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"},
-    {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"},
-    {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"},
-    {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"},
-    {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"},
-    {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"},
-    {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"},
-    {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"},
-    {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"},
-    {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"},
-    {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"},
-    {file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"},
-    {file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"},
-    {file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"},
-    {file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"},
-    {file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"},
-    {file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"},
-    {file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"},
-    {file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"},
-    {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"},
-    {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"},
-]
-
-[package.dependencies]
-click = ">=8.0.0"
-mypy-extensions = ">=0.4.3"
-packaging = ">=22.0"
-pathspec = ">=0.9.0"
-platformdirs = ">=2"
-tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
-typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""}
-
-[package.extras]
-colorama = ["colorama (>=0.4.3)"]
-d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"]
-jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
-uvloop = ["uvloop (>=0.15.2)"]
-
 [[package]]
 name = "bleach"
 version = "6.1.0"
@@ -182,13 +136,13 @@ files = [
 
 [[package]]
 name = "certifi"
-version = "2023.7.22"
+version = "2024.7.4"
 description = "Python package for providing Mozilla's CA Bundle."
 optional = false
 python-versions = ">=3.6"
 files = [
-    {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
-    {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+    {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
+    {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
 ]
 
 [[package]]
@@ -403,43 +357,38 @@ files = [
 
 [[package]]
 name = "cryptography"
-version = "42.0.8"
+version = "43.0.1"
 description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"},
-    {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"},
-    {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"},
-    {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"},
-    {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"},
-    {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"},
-    {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"},
-    {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"},
-    {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"},
-    {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"},
-    {file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"},
-    {file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"},
-    {file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"},
-    {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"},
-    {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"},
-    {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"},
-    {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"},
-    {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"},
-    {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"},
-    {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"},
-    {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"},
-    {file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"},
-    {file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"},
-    {file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"},
-    {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"},
-    {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"},
-    {file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"},
-    {file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"},
-    {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"},
-    {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"},
-    {file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"},
-    {file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"},
+    {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"},
+    {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"},
+    {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"},
+    {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"},
+    {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"},
+    {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"},
+    {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"},
+    {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"},
+    {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"},
+    {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"},
+    {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"},
+    {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"},
+    {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"},
+    {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"},
+    {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"},
+    {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"},
+    {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"},
+    {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"},
+    {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"},
+    {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"},
+    {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"},
+    {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"},
+    {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"},
+    {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"},
+    {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"},
+    {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"},
+    {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"},
 ]
 
 [package.dependencies]
@@ -452,7 +401,7 @@ nox = ["nox"]
 pep8test = ["check-sdist", "click", "mypy", "ruff"]
 sdist = ["build"]
 ssh = ["bcrypt (>=3.1.5)"]
-test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"]
+test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"]
 test-randomorder = ["pytest-randomly"]
 
 [[package]]
@@ -542,120 +491,105 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit",
 
 [[package]]
 name = "hiredis"
-version = "2.3.2"
+version = "3.0.0"
 description = "Python wrapper for hiredis"
 optional = true
-python-versions = ">=3.7"
+python-versions = ">=3.8"
 files = [
-    {file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:742093f33d374098aa21c1696ac6e4874b52658c870513a297a89265a4d08fe5"},
-    {file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:9e14fb70ca4f7efa924f508975199353bf653f452e4ef0a1e47549e208f943d7"},
-    {file = "hiredis-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d7302b4b17fcc1cc727ce84ded7f6be4655701e8d58744f73b09cb9ed2b13df"},
-    {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed63e8b75c193c5e5a8288d9d7b011da076cc314fafc3bfd59ec1d8a750d48c8"},
-    {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b4edee59dc089bc3948f4f6fba309f51aa2ccce63902364900aa0a553a85e97"},
-    {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6481c3b7673a86276220140456c2a6fbfe8d1fb5c613b4728293c8634134824"},
-    {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684840b014ce83541a087fcf2d48227196576f56ae3e944d4dfe14c0a3e0ccb7"},
-    {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c4c0bcf786f0eac9593367b6279e9b89534e008edbf116dcd0de956524702c8"},
-    {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66ab949424ac6504d823cba45c4c4854af5c59306a1531edb43b4dd22e17c102"},
-    {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:322c668ee1c12d6c5750a4b1057e6b4feee2a75b3d25d630922a463cfe5e7478"},
-    {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bfa73e3f163c6e8b2ec26f22285d717a5f77ab2120c97a2605d8f48b26950dac"},
-    {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:7f39f28ffc65de577c3bc0c7615f149e35bc927802a0f56e612db9b530f316f9"},
-    {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:55ce31bf4711da879b96d511208efb65a6165da4ba91cb3a96d86d5a8d9d23e6"},
-    {file = "hiredis-2.3.2-cp310-cp310-win32.whl", hash = "sha256:3dd63d0bbbe75797b743f35d37a4cca7ca7ba35423a0de742ae2985752f20c6d"},
-    {file = "hiredis-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:ea002656a8d974daaf6089863ab0a306962c8b715db6b10879f98b781a2a5bf5"},
-    {file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:adfbf2e9c38b77d0db2fb32c3bdaea638fa76b4e75847283cd707521ad2475ef"},
-    {file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:80b02d27864ebaf9b153d4b99015342382eeaed651f5591ce6f07e840307c56d"},
-    {file = "hiredis-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd40d2e2f82a483de0d0a6dfd8c3895a02e55e5c9949610ecbded18188fd0a56"},
-    {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfa904045d7cebfb0f01dad51352551cce1d873d7c3f80c7ded7d42f8cac8f89"},
-    {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28bd184b33e0dd6d65816c16521a4ba1ffbe9ff07d66873c42ea4049a62fed83"},
-    {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f70481213373d44614148f0f2e38e7905be3f021902ae5167289413196de4ba4"},
-    {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8797b528c1ff81eef06713623562b36db3dafa106b59f83a6468df788ff0d1"},
-    {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02fc71c8333586871602db4774d3a3e403b4ccf6446dc4603ec12df563127cee"},
-    {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0da56915bda1e0a49157191b54d3e27689b70960f0685fdd5c415dacdee2fbed"},
-    {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e2674a5a3168349435b08fa0b82998ed2536eb9acccf7087efe26e4cd088a525"},
-    {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:dc1c3fd49930494a67dcec37d0558d99d84eca8eb3f03b17198424538f2608d7"},
-    {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:14c7b43205e515f538a9defb4e411e0f0576caaeeda76bb9993ed505486f7562"},
-    {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7bac7e02915b970c3723a7a7c5df4ba7a11a3426d2a3f181e041aa506a1ff028"},
-    {file = "hiredis-2.3.2-cp311-cp311-win32.whl", hash = "sha256:63a090761ddc3c1f7db5e67aa4e247b4b3bb9890080bdcdadd1b5200b8b89ac4"},
-    {file = "hiredis-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:70d226ab0306a5b8d408235cabe51d4bf3554c9e8a72d53ce0b3c5c84cf78881"},
-    {file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5c614552c6bd1d0d907f448f75550f6b24fb56cbfce80c094908b7990cad9702"},
-    {file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9c431431abf55b64347ddc8df68b3ef840269cb0aa5bc2d26ad9506eb4b1b866"},
-    {file = "hiredis-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a45857e87e9d2b005e81ddac9d815a33efd26ec67032c366629f023fe64fb415"},
-    {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e138d141ec5a6ec800b6d01ddc3e5561ce1c940215e0eb9960876bfde7186aae"},
-    {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:387f655444d912a963ab68abf64bf6e178a13c8e4aa945cb27388fd01a02e6f1"},
-    {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4852f4bf88f0e2d9bdf91279892f5740ed22ae368335a37a52b92a5c88691140"},
-    {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d711c107e83117129b7f8bd08e9820c43ceec6204fff072a001fd82f6d13db9f"},
-    {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92830c16885f29163e1c2da1f3c1edb226df1210ec7e8711aaabba3dd0d5470a"},
-    {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:16b01d9ceae265d4ab9547be0cd628ecaff14b3360357a9d30c029e5ae8b7e7f"},
-    {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5986fb5f380169270a0293bebebd95466a1c85010b4f1afc2727e4d17c452512"},
-    {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:49532d7939cc51f8e99efc326090c54acf5437ed88b9c904cc8015b3c4eda9c9"},
-    {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8f34801b251ca43ad70691fb08b606a2e55f06b9c9fb1fc18fd9402b19d70f7b"},
-    {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7298562a49d95570ab1c7fc4051e72824c6a80e907993a21a41ba204223e7334"},
-    {file = "hiredis-2.3.2-cp312-cp312-win32.whl", hash = "sha256:e1d86b75de787481b04d112067a4033e1ecfda2a060e50318a74e4e1c9b2948c"},
-    {file = "hiredis-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:6dbfe1887ffa5cf3030451a56a8f965a9da2fa82b7149357752b67a335a05fc6"},
-    {file = "hiredis-2.3.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:4fc242e9da4af48714199216eb535b61e8f8d66552c8819e33fc7806bd465a09"},
-    {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e81aa4e9a1fcf604c8c4b51aa5d258e195a6ba81efe1da82dea3204443eba01c"},
-    {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:419780f8583ddb544ffa86f9d44a7fcc183cd826101af4e5ffe535b6765f5f6b"},
-    {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6871306d8b98a15e53a5f289ec1106a3a1d43e7ab6f4d785f95fcef9a7bd9504"},
-    {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb0b35b63717ef1e41d62f4f8717166f7c6245064957907cfe177cc144357c"},
-    {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c490191fa1218851f8a80c5a21a05a6f680ac5aebc2e688b71cbfe592f8fec6"},
-    {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4baf4b579b108062e91bd2a991dc98b9dc3dc06e6288db2d98895eea8acbac22"},
-    {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e627d8ef5e100556e09fb44c9571a432b10e11596d3c4043500080ca9944a91a"},
-    {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:ba3dc0af0def8c21ce7d903c59ea1e8ec4cb073f25ece9edaec7f92a286cd219"},
-    {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:56e9b7d6051688ca94e68c0c8a54a243f8db841911b683cedf89a29d4de91509"},
-    {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:380e029bb4b1d34cf560fcc8950bf6b57c2ef0c9c8b7c7ac20b7c524a730fadd"},
-    {file = "hiredis-2.3.2-cp37-cp37m-win32.whl", hash = "sha256:948d9f2ca7841794dd9b204644963a4bcd69ced4e959b0d4ecf1b8ce994a6daa"},
-    {file = "hiredis-2.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:cfa67afe2269b2d203cd1389c00c5bc35a287cd57860441fb0e53b371ea6a029"},
-    {file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bcbe47da0aebc00a7cfe3ebdcff0373b86ce2b1856251c003e3d69c9db44b5a7"},
-    {file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f2c9c0d910dd3f7df92f0638e7f65d8edd7f442203caf89c62fc79f11b0b73f8"},
-    {file = "hiredis-2.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:01b6c24c0840ac7afafbc4db236fd55f56a9a0919a215c25a238f051781f4772"},
-    {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1f567489f422d40c21e53212a73bef4638d9f21043848150f8544ef1f3a6ad1"},
-    {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28adecb308293e705e44087a1c2d557a816f032430d8a2a9bb7873902a1c6d48"},
-    {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27e9619847e9dc70b14b1ad2d0fb4889e7ca18996585c3463cff6c951fd6b10b"},
-    {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a0026cfbf29f07649b0e34509091a2a6016ff8844b127de150efce1c3aff60b"},
-    {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9de7586522e5da6bee83c9cf0dcccac0857a43249cb4d721a2e312d98a684d1"},
-    {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e58494f282215fc461b06709e9a195a24c12ba09570f25bdf9efb036acc05101"},
-    {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:de3a32b4b76d46f1eb42b24a918d51d8ca52411a381748196241d59a895f7c5c"},
-    {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1979334ccab21a49c544cd1b8d784ffb2747f99a51cb0bd0976eebb517628382"},
-    {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0c0773266e1c38a06e7593bd08870ac1503f5f0ce0f5c63f2b4134b090b5d6a4"},
-    {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bd1cee053416183adcc8e6134704c46c60c3f66b8faaf9e65bf76191ca59a2f7"},
-    {file = "hiredis-2.3.2-cp38-cp38-win32.whl", hash = "sha256:5341ce3d01ef3c7418a72e370bf028c7aeb16895e79e115fe4c954fff990489e"},
-    {file = "hiredis-2.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:8fc7197ff33047ce43a67851ccf190acb5b05c52fd4a001bb55766358f04da68"},
-    {file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:f47775e27388b58ce52f4f972f80e45b13c65113e9e6b6bf60148f893871dc9b"},
-    {file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:9412a06b8a8e09abd6313d96864b6d7713c6003a365995a5c70cfb9209df1570"},
-    {file = "hiredis-2.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3020b60e3fc96d08c2a9b011f1c2e2a6bdcc09cb55df93c509b88be5cb791df"},
-    {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53d0f2c59bce399b8010a21bc779b4f8c32d0f582b2284ac8c98dc7578b27bc4"},
-    {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57c0d0c7e308ed5280a4900d4468bbfec51f0e1b4cde1deae7d4e639bc6b7766"},
-    {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d63318ca189fddc7e75f6a4af8eae9c0545863619fb38cfba5f43e81280b286"},
-    {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e741ffe4e2db78a1b9dd6e5d29678ce37fbaaf65dfe132e5b82a794413302ef1"},
-    {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb98038ccd368e0d88bd92ee575c58cfaf33e77f788c36b2a89a84ee1936dc6b"},
-    {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:eae62ed60d53b3561148bcd8c2383e430af38c0deab9f2dd15f8874888ffd26f"},
-    {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ca33c175c1cf60222d9c6d01c38fc17ec3a484f32294af781de30226b003e00f"},
-    {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c5f6972d2bdee3cd301d5c5438e31195cf1cabf6fd9274491674d4ceb46914d"},
-    {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:a6b54dabfaa5dbaa92f796f0c32819b4636e66aa8e9106c3d421624bd2a2d676"},
-    {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e96cd35df012a17c87ae276196ea8f215e77d6eeca90709eb03999e2d5e3fd8a"},
-    {file = "hiredis-2.3.2-cp39-cp39-win32.whl", hash = "sha256:63b99b5ea9fe4f21469fb06a16ca5244307678636f11917359e3223aaeca0b67"},
-    {file = "hiredis-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a50c8af811b35b8a43b1590cf890b61ff2233225257a3cad32f43b3ec7ff1b9f"},
-    {file = "hiredis-2.3.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e8bf4444b09419b77ce671088db9f875b26720b5872d97778e2545cd87dba4a"},
-    {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bd42d0d45ea47a2f96babd82a659fbc60612ab9423a68e4a8191e538b85542a"},
-    {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80441b55edbef868e2563842f5030982b04349408396e5ac2b32025fb06b5212"},
-    {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec444ab8f27562a363672d6a7372bc0700a1bdc9764563c57c5f9efa0e592b5f"},
-    {file = "hiredis-2.3.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f9f606e810858207d4b4287b4ef0dc622c2aa469548bf02b59dcc616f134f811"},
-    {file = "hiredis-2.3.2-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c3dde4ca00fe9eee3b76209711f1941bb86db42b8a75d7f2249ff9dfc026ab0e"},
-    {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4dd676107a1d3c724a56a9d9db38166ad4cf44f924ee701414751bd18a784a0"},
-    {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce42649e2676ad783186264d5ffc788a7612ecd7f9effb62d51c30d413a3eefe"},
-    {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e3f8b1733078ac663dad57e20060e16389a60ab542f18a97931f3a2a2dd64a4"},
-    {file = "hiredis-2.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:532a84a82156a82529ec401d1c25d677c6543c791e54a263aa139541c363995f"},
-    {file = "hiredis-2.3.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4d59f88c4daa36b8c38e59ac7bffed6f5d7f68eaccad471484bf587b28ccc478"},
-    {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91a14dd95e24dc078204b18b0199226ee44644974c645dc54ee7b00c3157330"},
-    {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb777a38797c8c7df0444533119570be18d1a4ce5478dffc00c875684df7bfcb"},
-    {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d47c915897a99d0d34a39fad4be97b4b709ab3d0d3b779ebccf2b6024a8c681e"},
-    {file = "hiredis-2.3.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:333b5e04866758b11bda5f5315b4e671d15755fc6ed3b7969721bc6311d0ee36"},
-    {file = "hiredis-2.3.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c8937f1100435698c18e4da086968c4b5d70e86ea718376f833475ab3277c9aa"},
-    {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa45f7d771094b8145af10db74704ab0f698adb682fbf3721d8090f90e42cc49"},
-    {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33d5ebc93c39aed4b5bc769f8ce0819bc50e74bb95d57a35f838f1c4378978e0"},
-    {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a797d8c7df9944314d309b0d9e1b354e2fa4430a05bb7604da13b6ad291bf959"},
-    {file = "hiredis-2.3.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e15a408f71a6c8c87b364f1f15a6cd9c1baca12bbc47a326ac8ab99ec7ad3c64"},
-    {file = "hiredis-2.3.2.tar.gz", hash = "sha256:733e2456b68f3f126ddaf2cd500a33b25146c3676b97ea843665717bda0c5d43"},
+    {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:4b182791c41c5eb1d9ed736f0ff81694b06937ca14b0d4dadde5dadba7ff6dae"},
+    {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:13c275b483a052dd645eb2cb60d6380f1f5215e4c22d6207e17b86be6dd87ffa"},
+    {file = "hiredis-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1018cc7f12824506f165027eabb302735b49e63af73eb4d5450c66c88f47026"},
+    {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83a29cc7b21b746cb6a480189e49f49b2072812c445e66a9e38d2004d496b81c"},
+    {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e241fab6332e8fb5f14af00a4a9c6aefa22f19a336c069b7ddbf28ef8341e8d6"},
+    {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1fb8de899f0145d6c4d5d4bd0ee88a78eb980a7ffabd51e9889251b8f58f1785"},
+    {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b23291951959141173eec10f8573538e9349fa27f47a0c34323d1970bf891ee5"},
+    {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e421ac9e4b5efc11705a0d5149e641d4defdc07077f748667f359e60dc904420"},
+    {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:77c8006c12154c37691b24ff293c077300c22944018c3ff70094a33e10c1d795"},
+    {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:41afc0d3c18b59eb50970479a9c0e5544fb4b95e3a79cf2fbaece6ddefb926fe"},
+    {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:04ccae6dcd9647eae6025425ab64edb4d79fde8b9e6e115ebfabc6830170e3b2"},
+    {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fe91d62b0594db5ea7d23fc2192182b1a7b6973f628a9b8b2e0a42a2be721ac6"},
+    {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:99516d99316062824a24d145d694f5b0d030c80da693ea6f8c4ecf71a251d8bb"},
+    {file = "hiredis-3.0.0-cp310-cp310-win32.whl", hash = "sha256:562eaf820de045eb487afaa37e6293fe7eceb5b25e158b5a1974b7e40bf04543"},
+    {file = "hiredis-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a1c81c89ed765198da27412aa21478f30d54ef69bf5e4480089d9c3f77b8f882"},
+    {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:4664dedcd5933364756d7251a7ea86d60246ccf73a2e00912872dacbfcef8978"},
+    {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:47de0bbccf4c8a9f99d82d225f7672b9dd690d8fd872007b933ef51a302c9fa6"},
+    {file = "hiredis-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e43679eca508ba8240d016d8cca9d27342d70184773c15bea78a23c87a1922f1"},
+    {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13c345e7278c210317e77e1934b27b61394fee0dec2e8bd47e71570900f75823"},
+    {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00018f22f38530768b73ea86c11f47e8d4df65facd4e562bd78773bd1baef35e"},
+    {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ea3a86405baa8eb0d3639ced6926ad03e07113de54cb00fd7510cb0db76a89d"},
+    {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c073848d2b1d5561f3903879ccf4e1a70c9b1e7566c7bdcc98d082fa3e7f0a1d"},
+    {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a8dffb5f5b3415a4669d25de48b617fd9d44b0bccfc4c2ab24b06406ecc9ecb"},
+    {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:22c17c96143c2a62dfd61b13803bc5de2ac526b8768d2141c018b965d0333b66"},
+    {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c3ece960008dab66c6b8bb3a1350764677ee7c74ccd6270aaf1b1caf9ccebb46"},
+    {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f75999ae00a920f7dce6ecae76fa5e8674a3110e5a75f12c7a2c75ae1af53396"},
+    {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e069967cbd5e1900aafc4b5943888f6d34937fc59bf8918a1a546cb729b4b1e4"},
+    {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0aacc0a78e1d94d843a6d191f224a35893e6bdfeb77a4a89264155015c65f126"},
+    {file = "hiredis-3.0.0-cp311-cp311-win32.whl", hash = "sha256:719c32147ba29528cb451f037bf837dcdda4ff3ddb6cdb12c4216b0973174718"},
+    {file = "hiredis-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:bdc144d56333c52c853c31b4e2e52cfbdb22d3da4374c00f5f3d67c42158970f"},
+    {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:484025d2eb8f6348f7876fc5a2ee742f568915039fcb31b478fd5c242bb0fe3a"},
+    {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fcdb552ffd97151dab8e7bc3ab556dfa1512556b48a367db94b5c20253a35ee1"},
+    {file = "hiredis-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bb6f9fd92f147ba11d338ef5c68af4fd2908739c09e51f186e1d90958c68cc1"},
+    {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa86bf9a0ed339ec9e8a9a9d0ae4dccd8671625c83f9f9f2640729b15e07fbfd"},
+    {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e194a0d5df9456995d8f510eab9f529213e7326af6b94770abf8f8b7952ddcaa"},
+    {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a1df39d74ec507d79c7a82c8063eee60bf80537cdeee652f576059b9cdd15c"},
+    {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f91456507427ba36fd81b2ca11053a8e112c775325acc74e993201ea912d63e9"},
+    {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9862db92ef67a8a02e0d5370f07d380e14577ecb281b79720e0d7a89aedb9ee5"},
+    {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d10fcd9e0eeab835f492832b2a6edb5940e2f1230155f33006a8dfd3bd2c94e4"},
+    {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:48727d7d405d03977d01885f317328dc21d639096308de126c2c4e9950cbd3c9"},
+    {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e0bb6102ebe2efecf8a3292c6660a0e6fac98176af6de67f020bea1c2343717"},
+    {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:df274e3abb4df40f4c7274dd3e587dfbb25691826c948bc98d5fead019dfb001"},
+    {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:034925b5fb514f7b11aac38cd55b3fd7e9d3af23bd6497f3f20aa5b8ba58e232"},
+    {file = "hiredis-3.0.0-cp312-cp312-win32.whl", hash = "sha256:120f2dda469b28d12ccff7c2230225162e174657b49cf4cd119db525414ae281"},
+    {file = "hiredis-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:e584fe5f4e6681d8762982be055f1534e0170f6308a7a90f58d737bab12ff6a8"},
+    {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:122171ff47d96ed8dd4bba6c0e41d8afaba3e8194949f7720431a62aa29d8895"},
+    {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ba9fc605ac558f0de67463fb588722878641e6fa1dabcda979e8e69ff581d0bd"},
+    {file = "hiredis-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a631e2990b8be23178f655cae8ac6c7422af478c420dd54e25f2e26c29e766f1"},
+    {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63482db3fadebadc1d01ad33afa6045ebe2ea528eb77ccaabd33ee7d9c2bad48"},
+    {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f669212c390eebfbe03c4e20181f5970b82c5d0a0ad1df1785f7ffbe7d61150"},
+    {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a49ef161739f8018c69b371528bdb47d7342edfdee9ddc75a4d8caddf45a6e"},
+    {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98a152052b8878e5e43a2e3a14075218adafc759547c98668a21e9485882696c"},
+    {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50a196af0ce657fcde9bf8a0bbe1032e22c64d8fcec2bc926a35e7ff68b3a166"},
+    {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f2f312eef8aafc2255e3585dcf94d5da116c43ef837db91db9ecdc1bc930072d"},
+    {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:6ca41fa40fa019cde42c21add74aadd775e71458051a15a352eabeb12eb4d084"},
+    {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:6eecb343c70629f5af55a8b3e53264e44fa04e155ef7989de13668a0cb102a90"},
+    {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:c3fdad75e7837a475900a1d3a5cc09aa024293c3b0605155da2d42f41bc0e482"},
+    {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8854969e7480e8d61ed7549eb232d95082a743e94138d98d7222ba4e9f7ecacd"},
+    {file = "hiredis-3.0.0-cp38-cp38-win32.whl", hash = "sha256:f114a6c86edbf17554672b050cce72abf489fe58d583c7921904d5f1c9691605"},
+    {file = "hiredis-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:7d99b91e42217d7b4b63354b15b41ce960e27d216783e04c4a350224d55842a4"},
+    {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:4c6efcbb5687cf8d2aedcc2c3ed4ac6feae90b8547427d417111194873b66b06"},
+    {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5b5cff42a522a0d81c2ae7eae5e56d0ee7365e0c4ad50c4de467d8957aff4414"},
+    {file = "hiredis-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:82f794d564f4bc76b80c50b03267fe5d6589e93f08e66b7a2f674faa2fa76ebc"},
+    {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a4c1791d7aa7e192f60fe028ae409f18ccdd540f8b1e6aeb0df7816c77e4a4"},
+    {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2537b2cd98192323fce4244c8edbf11f3cac548a9d633dbbb12b48702f379f4"},
+    {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fed69bbaa307040c62195a269f82fc3edf46b510a17abb6b30a15d7dab548df"},
+    {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869f6d5537d243080f44253491bb30aa1ec3c21754003b3bddeadedeb65842b0"},
+    {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d435ae89073d7cd51e6b6bf78369c412216261c9c01662e7008ff00978153729"},
+    {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:204b79b30a0e6be0dc2301a4d385bb61472809f09c49f400497f1cdd5a165c66"},
+    {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3ea635101b739c12effd189cc19b2671c268abb03013fd1f6321ca29df3ca625"},
+    {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f359175197fd833c8dd7a8c288f1516be45415bb5c939862ab60c2918e1e1943"},
+    {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ac6d929cb33dd12ad3424b75725975f0a54b5b12dbff95f2a2d660c510aa106d"},
+    {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:100431e04d25a522ef2c3b94f294c4219c4de3bfc7d557b6253296145a144c11"},
+    {file = "hiredis-3.0.0-cp39-cp39-win32.whl", hash = "sha256:e1a9c14ae9573d172dc050a6f63a644457df5d01ec4d35a6a0f097f812930f83"},
+    {file = "hiredis-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:54a6dd7b478e6eb01ce15b3bb5bf771e108c6c148315bf194eb2ab776a3cac4d"},
+    {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:50da7a9edf371441dfcc56288d790985ee9840d982750580710a9789b8f4a290"},
+    {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9b285ef6bf1581310b0d5e8f6ce64f790a1c40e89c660e1320b35f7515433672"},
+    {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dcfa684966f25b335072115de2f920228a3c2caf79d4bfa2b30f6e4f674a948"},
+    {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a41be8af1fd78ca97bc948d789a09b730d1e7587d07ca53af05758f31f4b985d"},
+    {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:038756db735e417ab36ee6fd7725ce412385ed2bd0767e8179a4755ea11b804f"},
+    {file = "hiredis-3.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:fcecbd39bd42cef905c0b51c9689c39d0cc8b88b1671e7f40d4fb213423aef3a"},
+    {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a131377493a59fb0f5eaeb2afd49c6540cafcfba5b0b3752bed707be9e7c4eaf"},
+    {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d22c53f0ec5c18ecb3d92aa9420563b1c5d657d53f01356114978107b00b860"},
+    {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a91e9520fbc65a799943e5c970ffbcd67905744d8becf2e75f9f0a5e8414f0"},
+    {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc8043959b50141df58ab4f398e8ae84c6f9e673a2c9407be65fc789138f4a6"},
+    {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51b99cfac514173d7b8abdfe10338193e8a0eccdfe1870b646009d2fb7cbe4b5"},
+    {file = "hiredis-3.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:fa1fcad89d8a41d8dc10b1e54951ec1e161deabd84ed5a2c95c3c7213bdb3514"},
+    {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:898636a06d9bf575d2c594129085ad6b713414038276a4bfc5db7646b8a5be78"},
+    {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:466f836dbcf86de3f9692097a7a01533dc9926986022c6617dc364a402b265c5"},
+    {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23142a8af92a13fc1e3f2ca1d940df3dcf2af1d176be41fe8d89e30a837a0b60"},
+    {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:793c80a3d6b0b0e8196a2d5de37a08330125668c8012922685e17aa9108c33ac"},
+    {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:467d28112c7faa29b7db743f40803d927c8591e9da02b6ce3d5fadc170a542a2"},
+    {file = "hiredis-3.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:dc384874a719c767b50a30750f937af18842ee5e288afba95a5a3ed703b1515a"},
+    {file = "hiredis-3.0.0.tar.gz", hash = "sha256:fed8581ae26345dea1f1e0d1a96e05041a727a45e7d8d459164583e23c6ac441"},
 ]
 
 [[package]]
@@ -674,111 +608,119 @@ idna = ">=2.5"
 
 [[package]]
 name = "idna"
-version = "3.7"
+version = "3.10"
 description = "Internationalized Domain Names in Applications (IDNA)"
 optional = false
-python-versions = ">=3.5"
+python-versions = ">=3.6"
 files = [
-    {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
-    {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
+    {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
+    {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
 ]
 
+[package.extras]
+all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
+
 [[package]]
 name = "ijson"
-version = "3.2.3"
+version = "3.3.0"
 description = "Iterative JSON parser with standard Python iterator interfaces"
 optional = false
 python-versions = "*"
 files = [
-    {file = "ijson-3.2.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0a4ae076bf97b0430e4e16c9cb635a6b773904aec45ed8dcbc9b17211b8569ba"},
-    {file = "ijson-3.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cfced0a6ec85916eb8c8e22415b7267ae118eaff2a860c42d2cc1261711d0d31"},
-    {file = "ijson-3.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b9d1141cfd1e6d6643aa0b4876730d0d28371815ce846d2e4e84a2d4f471cf3"},
-    {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0a27db6454edd6013d40a956d008361aac5bff375a9c04ab11fc8c214250b5"},
-    {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0d526ccb335c3c13063c273637d8611f32970603dfb182177b232d01f14c23"},
-    {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:545a30b3659df2a3481593d30d60491d1594bc8005f99600e1bba647bb44cbb5"},
-    {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9680e37a10fedb3eab24a4a7e749d8a73f26f1a4c901430e7aa81b5da15f7307"},
-    {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2a80c0bb1053055d1599e44dc1396f713e8b3407000e6390add72d49633ff3bb"},
-    {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f05ed49f434ce396ddcf99e9fd98245328e99f991283850c309f5e3182211a79"},
-    {file = "ijson-3.2.3-cp310-cp310-win32.whl", hash = "sha256:b4eb2304573c9fdf448d3fa4a4fdcb727b93002b5c5c56c14a5ffbbc39f64ae4"},
-    {file = "ijson-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:923131f5153c70936e8bd2dd9dcfcff43c67a3d1c789e9c96724747423c173eb"},
-    {file = "ijson-3.2.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:904f77dd3d87736ff668884fe5197a184748eb0c3e302ded61706501d0327465"},
-    {file = "ijson-3.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0974444c1f416e19de1e9f567a4560890095e71e81623c509feff642114c1e53"},
-    {file = "ijson-3.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1a4b8eb69b6d7b4e94170aa991efad75ba156b05f0de2a6cd84f991def12ff9"},
-    {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d052417fd7ce2221114f8d3b58f05a83c1a2b6b99cafe0b86ac9ed5e2fc889df"},
-    {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b8064a85ec1b0beda7dd028e887f7112670d574db606f68006c72dd0bb0e0e2"},
-    {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaac293853f1342a8d2a45ac1f723c860f700860e7743fb97f7b76356df883a8"},
-    {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6c32c18a934c1dc8917455b0ce478fd7a26c50c364bd52c5a4fb0fc6bb516af7"},
-    {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:713a919e0220ac44dab12b5fed74f9130f3480e55e90f9d80f58de129ea24f83"},
-    {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a3a6a2fbbe7550ffe52d151cf76065e6b89cfb3e9d0463e49a7e322a25d0426"},
-    {file = "ijson-3.2.3-cp311-cp311-win32.whl", hash = "sha256:6a4db2f7fb9acfb855c9ae1aae602e4648dd1f88804a0d5cfb78c3639bcf156c"},
-    {file = "ijson-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:ccd6be56335cbb845f3d3021b1766299c056c70c4c9165fb2fbe2d62258bae3f"},
-    {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:055b71bbc37af5c3c5861afe789e15211d2d3d06ac51ee5a647adf4def19c0ea"},
-    {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c075a547de32f265a5dd139ab2035900fef6653951628862e5cdce0d101af557"},
-    {file = "ijson-3.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:457f8a5fc559478ac6b06b6d37ebacb4811f8c5156e997f0d87d708b0d8ab2ae"},
-    {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9788f0c915351f41f0e69ec2618b81ebfcf9f13d9d67c6d404c7f5afda3e4afb"},
-    {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa234ab7a6a33ed51494d9d2197fb96296f9217ecae57f5551a55589091e7853"},
-    {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd0dc5da4f9dc6d12ab6e8e0c57d8b41d3c8f9ceed31a99dae7b2baf9ea769a"},
-    {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c6beb80df19713e39e68dc5c337b5c76d36ccf69c30b79034634e5e4c14d6904"},
-    {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a2973ce57afb142d96f35a14e9cfec08308ef178a2c76b8b5e1e98f3960438bf"},
-    {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:105c314fd624e81ed20f925271ec506523b8dd236589ab6c0208b8707d652a0e"},
-    {file = "ijson-3.2.3-cp312-cp312-win32.whl", hash = "sha256:ac44781de5e901ce8339352bb5594fcb3b94ced315a34dbe840b4cff3450e23b"},
-    {file = "ijson-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:0567e8c833825b119e74e10a7c29761dc65fcd155f5d4cb10f9d3b8916ef9912"},
-    {file = "ijson-3.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eeb286639649fb6bed37997a5e30eefcacddac79476d24128348ec890b2a0ccb"},
-    {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:396338a655fb9af4ac59dd09c189885b51fa0eefc84d35408662031023c110d1"},
-    {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e0243d166d11a2a47c17c7e885debf3b19ed136be2af1f5d1c34212850236ac"},
-    {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85afdb3f3a5d0011584d4fa8e6dccc5936be51c27e84cd2882fe904ca3bd04c5"},
-    {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4fc35d569eff3afa76bfecf533f818ecb9390105be257f3f83c03204661ace70"},
-    {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:455d7d3b7a6aacfb8ab1ebcaf697eedf5be66e044eac32508fccdc633d995f0e"},
-    {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:c63f3d57dbbac56cead05b12b81e8e1e259f14ce7f233a8cbe7fa0996733b628"},
-    {file = "ijson-3.2.3-cp36-cp36m-win32.whl", hash = "sha256:a4d7fe3629de3ecb088bff6dfe25f77be3e8261ed53d5e244717e266f8544305"},
-    {file = "ijson-3.2.3-cp36-cp36m-win_amd64.whl", hash = "sha256:96190d59f015b5a2af388a98446e411f58ecc6a93934e036daa75f75d02386a0"},
-    {file = "ijson-3.2.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:35194e0b8a2bda12b4096e2e792efa5d4801a0abb950c48ade351d479cd22ba5"},
-    {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1053fb5f0b010ee76ca515e6af36b50d26c1728ad46be12f1f147a835341083"},
-    {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:211124cff9d9d139dd0dfced356f1472860352c055d2481459038b8205d7d742"},
-    {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92dc4d48e9f6a271292d6079e9fcdce33c83d1acf11e6e12696fb05c5889fe74"},
-    {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3dcc33ee56f92a77f48776014ddb47af67c33dda361e84371153c4f1ed4434e1"},
-    {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98c6799925a5d1988da4cd68879b8eeab52c6e029acc45e03abb7921a4715c4b"},
-    {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4252e48c95cd8ceefc2caade310559ab61c37d82dfa045928ed05328eb5b5f65"},
-    {file = "ijson-3.2.3-cp37-cp37m-win32.whl", hash = "sha256:644f4f03349ff2731fd515afd1c91b9e439e90c9f8c28292251834154edbffca"},
-    {file = "ijson-3.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:ba33c764afa9ecef62801ba7ac0319268a7526f50f7601370d9f8f04e77fc02b"},
-    {file = "ijson-3.2.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4b2ec8c2a3f1742cbd5f36b65e192028e541b5fd8c7fd97c1fc0ca6c427c704a"},
-    {file = "ijson-3.2.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dc357da4b4ebd8903e77dbcc3ce0555ee29ebe0747c3c7f56adda423df8ec89"},
-    {file = "ijson-3.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bcc51c84bb220ac330122468fe526a7777faa6464e3b04c15b476761beea424f"},
-    {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8d54b624629f9903005c58d9321a036c72f5c212701bbb93d1a520ecd15e370"},
-    {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6ea7c7e3ec44742e867c72fd750c6a1e35b112f88a917615332c4476e718d40"},
-    {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:916acdc5e504f8b66c3e287ada5d4b39a3275fc1f2013c4b05d1ab9933671a6c"},
-    {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81815b4184b85ce124bfc4c446d5f5e5e643fc119771c5916f035220ada29974"},
-    {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b49fd5fe1cd9c1c8caf6c59f82b08117dd6bea2ec45b641594e25948f48f4169"},
-    {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:86b3c91fdcb8ffb30556c9669930f02b7642de58ca2987845b04f0d7fe46d9a8"},
-    {file = "ijson-3.2.3-cp38-cp38-win32.whl", hash = "sha256:a729b0c8fb935481afe3cf7e0dadd0da3a69cc7f145dbab8502e2f1e01d85a7c"},
-    {file = "ijson-3.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:d34e049992d8a46922f96483e96b32ac4c9cffd01a5c33a928e70a283710cd58"},
-    {file = "ijson-3.2.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9c2a12dcdb6fa28f333bf10b3a0f80ec70bc45280d8435be7e19696fab2bc706"},
-    {file = "ijson-3.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1844c5b57da21466f255a0aeddf89049e730d7f3dfc4d750f0e65c36e6a61a7c"},
-    {file = "ijson-3.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ec3e5ff2515f1c40ef6a94983158e172f004cd643b9e4b5302017139b6c96e4"},
-    {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46bafb1b9959872a1f946f8dd9c6f1a30a970fc05b7bfae8579da3f1f988e598"},
-    {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab4db9fee0138b60e31b3c02fff8a4c28d7b152040553b6a91b60354aebd4b02"},
-    {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4bc87e69d1997c6a55fff5ee2af878720801ff6ab1fb3b7f94adda050651e37"},
-    {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e9fd906f0c38e9f0bfd5365e1bed98d649f506721f76bb1a9baa5d7374f26f19"},
-    {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e84d27d1acb60d9102728d06b9650e5b7e5cb0631bd6e3dfadba8fb6a80d6c2f"},
-    {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2cc04fc0a22bb945cd179f614845c8b5106c0b3939ee0d84ce67c7a61ac1a936"},
-    {file = "ijson-3.2.3-cp39-cp39-win32.whl", hash = "sha256:e641814793a037175f7ec1b717ebb68f26d89d82cfd66f36e588f32d7e488d5f"},
-    {file = "ijson-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:6bd3e7e91d031f1e8cea7ce53f704ab74e61e505e8072467e092172422728b22"},
-    {file = "ijson-3.2.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:06f9707da06a19b01013f8c65bf67db523662a9b4a4ff027e946e66c261f17f0"},
-    {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be8495f7c13fa1f622a2c6b64e79ac63965b89caf664cc4e701c335c652d15f2"},
-    {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7596b42f38c3dcf9d434dddd50f46aeb28e96f891444c2b4b1266304a19a2c09"},
-    {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbac4e9609a1086bbad075beb2ceec486a3b138604e12d2059a33ce2cba93051"},
-    {file = "ijson-3.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:db2d6341f9cb538253e7fe23311d59252f124f47165221d3c06a7ed667ecd595"},
-    {file = "ijson-3.2.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fa8b98be298efbb2588f883f9953113d8a0023ab39abe77fe734b71b46b1220a"},
-    {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:674e585361c702fad050ab4c153fd168dc30f5980ef42b64400bc84d194e662d"},
-    {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd12e42b9cb9c0166559a3ffa276b4f9fc9d5b4c304e5a13668642d34b48b634"},
-    {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d31e0d771d82def80cd4663a66de277c3b44ba82cd48f630526b52f74663c639"},
-    {file = "ijson-3.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ce4c70c23521179d6da842bb9bc2e36bb9fad1e0187e35423ff0f282890c9ca"},
-    {file = "ijson-3.2.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39f551a6fbeed4433c85269c7c8778e2aaea2501d7ebcb65b38f556030642c17"},
-    {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b14d322fec0de7af16f3ef920bf282f0dd747200b69e0b9628117f381b7775b"},
-    {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7851a341429b12d4527ca507097c959659baf5106c7074d15c17c387719ffbcd"},
-    {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db3bf1b42191b5cc9b6441552fdcb3b583594cb6b19e90d1578b7cbcf80d0fae"},
-    {file = "ijson-3.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6f662dc44362a53af3084d3765bb01cd7b4734d1f484a6095cad4cb0cbfe5374"},
-    {file = "ijson-3.2.3.tar.gz", hash = "sha256:10294e9bf89cb713da05bc4790bdff616610432db561964827074898e174f917"},
+    {file = "ijson-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7f7a5250599c366369fbf3bc4e176f5daa28eb6bc7d6130d02462ed335361675"},
+    {file = "ijson-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f87a7e52f79059f9c58f6886c262061065eb6f7554a587be7ed3aa63e6b71b34"},
+    {file = "ijson-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b73b493af9e947caed75d329676b1b801d673b17481962823a3e55fe529c8b8b"},
+    {file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5576415f3d76290b160aa093ff968f8bf6de7d681e16e463a0134106b506f49"},
+    {file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e9ffe358d5fdd6b878a8a364e96e15ca7ca57b92a48f588378cef315a8b019e"},
+    {file = "ijson-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8643c255a25824ddd0895c59f2319c019e13e949dc37162f876c41a283361527"},
+    {file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:df3ab5e078cab19f7eaeef1d5f063103e1ebf8c26d059767b26a6a0ad8b250a3"},
+    {file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3dc1fb02c6ed0bae1b4bf96971258bf88aea72051b6e4cebae97cff7090c0607"},
+    {file = "ijson-3.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e9afd97339fc5a20f0542c971f90f3ca97e73d3050cdc488d540b63fae45329a"},
+    {file = "ijson-3.3.0-cp310-cp310-win32.whl", hash = "sha256:844c0d1c04c40fd1b60f148dc829d3f69b2de789d0ba239c35136efe9a386529"},
+    {file = "ijson-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:d654d045adafdcc6c100e8e911508a2eedbd2a1b5f93f930ba13ea67d7704ee9"},
+    {file = "ijson-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:501dce8eaa537e728aa35810656aa00460a2547dcb60937c8139f36ec344d7fc"},
+    {file = "ijson-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:658ba9cad0374d37b38c9893f4864f284cdcc7d32041f9808fba8c7bcaadf134"},
+    {file = "ijson-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2636cb8c0f1023ef16173f4b9a233bcdb1df11c400c603d5f299fac143ca8d70"},
+    {file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd174b90db68c3bcca273e9391934a25d76929d727dc75224bf244446b28b03b"},
+    {file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:97a9aea46e2a8371c4cf5386d881de833ed782901ac9f67ebcb63bb3b7d115af"},
+    {file = "ijson-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c594c0abe69d9d6099f4ece17763d53072f65ba60b372d8ba6de8695ce6ee39e"},
+    {file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8e0ff16c224d9bfe4e9e6bd0395826096cda4a3ef51e6c301e1b61007ee2bd24"},
+    {file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0015354011303175eae7e2ef5136414e91de2298e5a2e9580ed100b728c07e51"},
+    {file = "ijson-3.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034642558afa57351a0ffe6de89e63907c4cf6849070cc10a3b2542dccda1afe"},
+    {file = "ijson-3.3.0-cp311-cp311-win32.whl", hash = "sha256:192e4b65495978b0bce0c78e859d14772e841724d3269fc1667dc6d2f53cc0ea"},
+    {file = "ijson-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:72e3488453754bdb45c878e31ce557ea87e1eb0f8b4fc610373da35e8074ce42"},
+    {file = "ijson-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:988e959f2f3d59ebd9c2962ae71b97c0df58323910d0b368cc190ad07429d1bb"},
+    {file = "ijson-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b2f73f0d0fce5300f23a1383d19b44d103bb113b57a69c36fd95b7c03099b181"},
+    {file = "ijson-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0ee57a28c6bf523d7cb0513096e4eb4dac16cd935695049de7608ec110c2b751"},
+    {file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0155a8f079c688c2ccaea05de1ad69877995c547ba3d3612c1c336edc12a3a5"},
+    {file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ab00721304af1ae1afa4313ecfa1bf16b07f55ef91e4a5b93aeaa3e2bd7917c"},
+    {file = "ijson-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40ee3821ee90be0f0e95dcf9862d786a7439bd1113e370736bfdf197e9765bfb"},
+    {file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3b6987a0bc3e6d0f721b42c7a0198ef897ae50579547b0345f7f02486898f5"},
+    {file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:63afea5f2d50d931feb20dcc50954e23cef4127606cc0ecf7a27128ed9f9a9e6"},
+    {file = "ijson-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b5c3e285e0735fd8c5a26d177eca8b52512cdd8687ca86ec77a0c66e9c510182"},
+    {file = "ijson-3.3.0-cp312-cp312-win32.whl", hash = "sha256:907f3a8674e489abdcb0206723e5560a5cb1fa42470dcc637942d7b10f28b695"},
+    {file = "ijson-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8f890d04ad33262d0c77ead53c85f13abfb82f2c8f078dfbf24b78f59534dfdd"},
+    {file = "ijson-3.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b9d85a02e77ee8ea6d9e3fd5d515bcc3d798d9c1ea54817e5feb97a9bc5d52fe"},
+    {file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6576cdc36d5a09b0c1a3d81e13a45d41a6763188f9eaae2da2839e8a4240bce"},
+    {file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5589225c2da4bb732c9c370c5961c39a6db72cf69fb2a28868a5413ed7f39e6"},
+    {file = "ijson-3.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad04cf38164d983e85f9cba2804566c0160b47086dcca4cf059f7e26c5ace8ca"},
+    {file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:a3b730ef664b2ef0e99dec01b6573b9b085c766400af363833e08ebc1e38eb2f"},
+    {file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:4690e3af7b134298055993fcbea161598d23b6d3ede11b12dca6815d82d101d5"},
+    {file = "ijson-3.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:aaa6bfc2180c31a45fac35d40e3312a3d09954638ce0b2e9424a88e24d262a13"},
+    {file = "ijson-3.3.0-cp36-cp36m-win32.whl", hash = "sha256:44367090a5a876809eb24943f31e470ba372aaa0d7396b92b953dda953a95d14"},
+    {file = "ijson-3.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7e2b3e9ca957153557d06c50a26abaf0d0d6c0ddf462271854c968277a6b5372"},
+    {file = "ijson-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:47c144117e5c0e2babb559bc8f3f76153863b8dd90b2d550c51dab5f4b84a87f"},
+    {file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29ce02af5fbf9ba6abb70765e66930aedf73311c7d840478f1ccecac53fefbf3"},
+    {file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ac6c3eeed25e3e2cb9b379b48196413e40ac4e2239d910bb33e4e7f6c137745"},
+    {file = "ijson-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d92e339c69b585e7b1d857308ad3ca1636b899e4557897ccd91bb9e4a56c965b"},
+    {file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:8c85447569041939111b8c7dbf6f8fa7a0eb5b2c4aebb3c3bec0fb50d7025121"},
+    {file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:542c1e8fddf082159a5d759ee1412c73e944a9a2412077ed00b303ff796907dc"},
+    {file = "ijson-3.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:30cfea40936afb33b57d24ceaf60d0a2e3d5c1f2335ba2623f21d560737cc730"},
+    {file = "ijson-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:6b661a959226ad0d255e49b77dba1d13782f028589a42dc3172398dd3814c797"},
+    {file = "ijson-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:0b003501ee0301dbf07d1597482009295e16d647bb177ce52076c2d5e64113e0"},
+    {file = "ijson-3.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3e8d8de44effe2dbd0d8f3eb9840344b2d5b4cc284a14eb8678aec31d1b6bea8"},
+    {file = "ijson-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9cd5c03c63ae06d4f876b9844c5898d0044c7940ff7460db9f4cd984ac7862b5"},
+    {file = "ijson-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04366e7e4a4078d410845e58a2987fd9c45e63df70773d7b6e87ceef771b51ee"},
+    {file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de7c1ddb80fa7a3ab045266dca169004b93f284756ad198306533b792774f10a"},
+    {file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8851584fb931cffc0caa395f6980525fd5116eab8f73ece9d95e6f9c2c326c4c"},
+    {file = "ijson-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdcfc88347fd981e53c33d832ce4d3e981a0d696b712fbcb45dcc1a43fe65c65"},
+    {file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3917b2b3d0dbbe3296505da52b3cb0befbaf76119b2edaff30bd448af20b5400"},
+    {file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:e10c14535abc7ddf3fd024aa36563cd8ab5d2bb6234a5d22c77c30e30fa4fb2b"},
+    {file = "ijson-3.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3aba5c4f97f4e2ce854b5591a8b0711ca3b0c64d1b253b04ea7b004b0a197ef6"},
+    {file = "ijson-3.3.0-cp38-cp38-win32.whl", hash = "sha256:b325f42e26659df1a0de66fdb5cde8dd48613da9c99c07d04e9fb9e254b7ee1c"},
+    {file = "ijson-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:ff835906f84451e143f31c4ce8ad73d83ef4476b944c2a2da91aec8b649570e1"},
+    {file = "ijson-3.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3c556f5553368dff690c11d0a1fb435d4ff1f84382d904ccc2dc53beb27ba62e"},
+    {file = "ijson-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e4396b55a364a03ff7e71a34828c3ed0c506814dd1f50e16ebed3fc447d5188e"},
+    {file = "ijson-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6850ae33529d1e43791b30575070670070d5fe007c37f5d06aebc1dd152ab3f"},
+    {file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36aa56d68ea8def26778eb21576ae13f27b4a47263a7a2581ab2ef58b8de4451"},
+    {file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7ec759c4a0fc820ad5dc6a58e9c391e7b16edcb618056baedbedbb9ea3b1524"},
+    {file = "ijson-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b51bab2c4e545dde93cb6d6bb34bf63300b7cd06716f195dd92d9255df728331"},
+    {file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:92355f95a0e4da96d4c404aa3cff2ff033f9180a9515f813255e1526551298c1"},
+    {file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8795e88adff5aa3c248c1edce932db003d37a623b5787669ccf205c422b91e4a"},
+    {file = "ijson-3.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8f83f553f4cde6d3d4eaf58ec11c939c94a0ec545c5b287461cafb184f4b3a14"},
+    {file = "ijson-3.3.0-cp39-cp39-win32.whl", hash = "sha256:ead50635fb56577c07eff3e557dac39533e0fe603000684eea2af3ed1ad8f941"},
+    {file = "ijson-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:c8a9befb0c0369f0cf5c1b94178d0d78f66d9cebb9265b36be6e4f66236076b8"},
+    {file = "ijson-3.3.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2af323a8aec8a50fa9effa6d640691a30a9f8c4925bd5364a1ca97f1ac6b9b5c"},
+    {file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f64f01795119880023ba3ce43072283a393f0b90f52b66cc0ea1a89aa64a9ccb"},
+    {file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a716e05547a39b788deaf22725490855337fc36613288aa8ae1601dc8c525553"},
+    {file = "ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:473f5d921fadc135d1ad698e2697025045cd8ed7e5e842258295012d8a3bc702"},
+    {file = "ijson-3.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd26b396bc3a1e85f4acebeadbf627fa6117b97f4c10b177d5779577c6607744"},
+    {file = "ijson-3.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:25fd49031cdf5fd5f1fd21cb45259a64dad30b67e64f745cc8926af1c8c243d3"},
+    {file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b72178b1e565d06ab19319965022b36ef41bcea7ea153b32ec31194bec032a2"},
+    {file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d0b6b637d05dbdb29d0bfac2ed8425bb369e7af5271b0cc7cf8b801cb7360c2"},
+    {file = "ijson-3.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5378d0baa59ae422905c5f182ea0fd74fe7e52a23e3821067a7d58c8306b2191"},
+    {file = "ijson-3.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:99f5c8ab048ee4233cc4f2b461b205cbe01194f6201018174ac269bf09995749"},
+    {file = "ijson-3.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:45ff05de889f3dc3d37a59d02096948ce470699f2368b32113954818b21aa74a"},
+    {file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1efb521090dd6cefa7aafd120581947b29af1713c902ff54336b7c7130f04c47"},
+    {file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87c727691858fd3a1c085d9980d12395517fcbbf02c69fbb22dede8ee03422da"},
+    {file = "ijson-3.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0420c24e50389bc251b43c8ed379ab3e3ba065ac8262d98beb6735ab14844460"},
+    {file = "ijson-3.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8fdf3721a2aa7d96577970f5604bd81f426969c1822d467f07b3d844fa2fecc7"},
+    {file = "ijson-3.3.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:891f95c036df1bc95309951940f8eea8537f102fa65715cdc5aae20b8523813b"},
+    {file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed1336a2a6e5c427f419da0154e775834abcbc8ddd703004108121c6dd9eba9d"},
+    {file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0c819f83e4f7b7f7463b2dc10d626a8be0c85fbc7b3db0edc098c2b16ac968e"},
+    {file = "ijson-3.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33afc25057377a6a43c892de34d229a86f89ea6c4ca3dd3db0dcd17becae0dbb"},
+    {file = "ijson-3.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7914d0cf083471856e9bc2001102a20f08e82311dfc8cf1a91aa422f9414a0d6"},
+    {file = "ijson-3.3.0.tar.gz", hash = "sha256:7f172e6ba1bee0d4c8f8ebd639577bfe429dee0f3f96775a067b8bae4492d8a0"},
 ]
 
 [[package]]
@@ -831,32 +773,21 @@ testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-chec
 
 [[package]]
 name = "incremental"
-version = "22.10.0"
-description = "\"A small library that versions your Python projects.\""
+version = "24.7.2"
+description = "A small library that versions your Python projects."
 optional = false
-python-versions = "*"
+python-versions = ">=3.8"
 files = [
-    {file = "incremental-22.10.0-py2.py3-none-any.whl", hash = "sha256:b864a1f30885ee72c5ac2835a761b8fe8aa9c28b9395cacf27286602688d3e51"},
-    {file = "incremental-22.10.0.tar.gz", hash = "sha256:912feeb5e0f7e0188e6f42241d2f450002e11bbc0937c65865045854c24c0bd0"},
+    {file = "incremental-24.7.2-py3-none-any.whl", hash = "sha256:8cb2c3431530bec48ad70513931a760f446ad6c25e8333ca5d95e24b0ed7b8fe"},
+    {file = "incremental-24.7.2.tar.gz", hash = "sha256:fb4f1d47ee60efe87d4f6f0ebb5f70b9760db2b2574c59c8e8912be4ebd464c9"},
 ]
 
-[package.extras]
-mypy = ["click (>=6.0)", "mypy (==0.812)", "twisted (>=16.4.0)"]
-scripts = ["click (>=6.0)", "twisted (>=16.4.0)"]
-
-[[package]]
-name = "isort"
-version = "5.13.2"
-description = "A Python utility / library to sort Python imports."
-optional = false
-python-versions = ">=3.8.0"
-files = [
-    {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"},
-    {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"},
-]
+[package.dependencies]
+setuptools = ">=61.0"
+tomli = {version = "*", markers = "python_version < \"3.11\""}
 
 [package.extras]
-colors = ["colorama (>=0.4.6)"]
+scripts = ["click (>=6.0)"]
 
 [[package]]
 name = "jaeger-client"
@@ -929,13 +860,13 @@ i18n = ["Babel (>=2.7)"]
 
 [[package]]
 name = "jsonschema"
-version = "4.22.0"
+version = "4.23.0"
 description = "An implementation of JSON Schema validation for Python"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"},
-    {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"},
+    {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
+    {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
 ]
 
 [package.dependencies]
@@ -948,7 +879,7 @@ rpds-py = ">=0.7.1"
 
 [package.extras]
 format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
-format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
+format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"]
 
 [[package]]
 name = "jsonschema-specifications"
@@ -1005,153 +936,149 @@ pyasn1 = ">=0.4.6"
 
 [[package]]
 name = "lxml"
-version = "5.2.2"
+version = "5.3.0"
 description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
 optional = true
 python-versions = ">=3.6"
 files = [
-    {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:364d03207f3e603922d0d3932ef363d55bbf48e3647395765f9bfcbdf6d23632"},
-    {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50127c186f191b8917ea2fb8b206fbebe87fd414a6084d15568c27d0a21d60db"},
-    {file = "lxml-5.2.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74e4f025ef3db1c6da4460dd27c118d8cd136d0391da4e387a15e48e5c975147"},
-    {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:981a06a3076997adf7c743dcd0d7a0415582661e2517c7d961493572e909aa1d"},
-    {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aef5474d913d3b05e613906ba4090433c515e13ea49c837aca18bde190853dff"},
-    {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e275ea572389e41e8b039ac076a46cb87ee6b8542df3fff26f5baab43713bca"},
-    {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5b65529bb2f21ac7861a0e94fdbf5dc0daab41497d18223b46ee8515e5ad297"},
-    {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bcc98f911f10278d1daf14b87d65325851a1d29153caaf146877ec37031d5f36"},
-    {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:b47633251727c8fe279f34025844b3b3a3e40cd1b198356d003aa146258d13a2"},
-    {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:fbc9d316552f9ef7bba39f4edfad4a734d3d6f93341232a9dddadec4f15d425f"},
-    {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:13e69be35391ce72712184f69000cda04fc89689429179bc4c0ae5f0b7a8c21b"},
-    {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b6a30a9ab040b3f545b697cb3adbf3696c05a3a68aad172e3fd7ca73ab3c835"},
-    {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a233bb68625a85126ac9f1fc66d24337d6e8a0f9207b688eec2e7c880f012ec0"},
-    {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:dfa7c241073d8f2b8e8dbc7803c434f57dbb83ae2a3d7892dd068d99e96efe2c"},
-    {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1a7aca7964ac4bb07680d5c9d63b9d7028cace3e2d43175cb50bba8c5ad33316"},
-    {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ae4073a60ab98529ab8a72ebf429f2a8cc612619a8c04e08bed27450d52103c0"},
-    {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ffb2be176fed4457e445fe540617f0252a72a8bc56208fd65a690fdb1f57660b"},
-    {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e290d79a4107d7d794634ce3e985b9ae4f920380a813717adf61804904dc4393"},
-    {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:96e85aa09274955bb6bd483eaf5b12abadade01010478154b0ec70284c1b1526"},
-    {file = "lxml-5.2.2-cp310-cp310-win32.whl", hash = "sha256:f956196ef61369f1685d14dad80611488d8dc1ef00be57c0c5a03064005b0f30"},
-    {file = "lxml-5.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:875a3f90d7eb5c5d77e529080d95140eacb3c6d13ad5b616ee8095447b1d22e7"},
-    {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45f9494613160d0405682f9eee781c7e6d1bf45f819654eb249f8f46a2c22545"},
-    {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0b3f2df149efb242cee2ffdeb6674b7f30d23c9a7af26595099afaf46ef4e88"},
-    {file = "lxml-5.2.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d28cb356f119a437cc58a13f8135ab8a4c8ece18159eb9194b0d269ec4e28083"},
-    {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:657a972f46bbefdbba2d4f14413c0d079f9ae243bd68193cb5061b9732fa54c1"},
-    {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b9ea10063efb77a965a8d5f4182806fbf59ed068b3c3fd6f30d2ac7bee734"},
-    {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07542787f86112d46d07d4f3c4e7c760282011b354d012dc4141cc12a68cef5f"},
-    {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:303f540ad2dddd35b92415b74b900c749ec2010e703ab3bfd6660979d01fd4ed"},
-    {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2eb2227ce1ff998faf0cd7fe85bbf086aa41dfc5af3b1d80867ecfe75fb68df3"},
-    {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:1d8a701774dfc42a2f0b8ccdfe7dbc140500d1049e0632a611985d943fcf12df"},
-    {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:56793b7a1a091a7c286b5f4aa1fe4ae5d1446fe742d00cdf2ffb1077865db10d"},
-    {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eb00b549b13bd6d884c863554566095bf6fa9c3cecb2e7b399c4bc7904cb33b5"},
-    {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a2569a1f15ae6c8c64108a2cd2b4a858fc1e13d25846be0666fc144715e32ab"},
-    {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:8cf85a6e40ff1f37fe0f25719aadf443686b1ac7652593dc53c7ef9b8492b115"},
-    {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:d237ba6664b8e60fd90b8549a149a74fcc675272e0e95539a00522e4ca688b04"},
-    {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0b3f5016e00ae7630a4b83d0868fca1e3d494c78a75b1c7252606a3a1c5fc2ad"},
-    {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23441e2b5339bc54dc949e9e675fa35efe858108404ef9aa92f0456929ef6fe8"},
-    {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2fb0ba3e8566548d6c8e7dd82a8229ff47bd8fb8c2da237607ac8e5a1b8312e5"},
-    {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:79d1fb9252e7e2cfe4de6e9a6610c7cbb99b9708e2c3e29057f487de5a9eaefa"},
-    {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6dcc3d17eac1df7859ae01202e9bb11ffa8c98949dcbeb1069c8b9a75917e01b"},
-    {file = "lxml-5.2.2-cp311-cp311-win32.whl", hash = "sha256:4c30a2f83677876465f44c018830f608fa3c6a8a466eb223535035fbc16f3438"},
-    {file = "lxml-5.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:49095a38eb333aaf44c06052fd2ec3b8f23e19747ca7ec6f6c954ffea6dbf7be"},
-    {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7429e7faa1a60cad26ae4227f4dd0459efde239e494c7312624ce228e04f6391"},
-    {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:50ccb5d355961c0f12f6cf24b7187dbabd5433f29e15147a67995474f27d1776"},
-    {file = "lxml-5.2.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc911208b18842a3a57266d8e51fc3cfaccee90a5351b92079beed912a7914c2"},
-    {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33ce9e786753743159799fdf8e92a5da351158c4bfb6f2db0bf31e7892a1feb5"},
-    {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec87c44f619380878bd49ca109669c9f221d9ae6883a5bcb3616785fa8f94c97"},
-    {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08ea0f606808354eb8f2dfaac095963cb25d9d28e27edcc375d7b30ab01abbf6"},
-    {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75a9632f1d4f698b2e6e2e1ada40e71f369b15d69baddb8968dcc8e683839b18"},
-    {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74da9f97daec6928567b48c90ea2c82a106b2d500f397eeb8941e47d30b1ca85"},
-    {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:0969e92af09c5687d769731e3f39ed62427cc72176cebb54b7a9d52cc4fa3b73"},
-    {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:9164361769b6ca7769079f4d426a41df6164879f7f3568be9086e15baca61466"},
-    {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d26a618ae1766279f2660aca0081b2220aca6bd1aa06b2cf73f07383faf48927"},
-    {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab67ed772c584b7ef2379797bf14b82df9aa5f7438c5b9a09624dd834c1c1aaf"},
-    {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3d1e35572a56941b32c239774d7e9ad724074d37f90c7a7d499ab98761bd80cf"},
-    {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8268cbcd48c5375f46e000adb1390572c98879eb4f77910c6053d25cc3ac2c67"},
-    {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e282aedd63c639c07c3857097fc0e236f984ceb4089a8b284da1c526491e3f3d"},
-    {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfdc2bfe69e9adf0df4915949c22a25b39d175d599bf98e7ddf620a13678585"},
-    {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4aefd911793b5d2d7a921233a54c90329bf3d4a6817dc465f12ffdfe4fc7b8fe"},
-    {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8b8df03a9e995b6211dafa63b32f9d405881518ff1ddd775db4e7b98fb545e1c"},
-    {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f11ae142f3a322d44513de1018b50f474f8f736bc3cd91d969f464b5bfef8836"},
-    {file = "lxml-5.2.2-cp312-cp312-win32.whl", hash = "sha256:16a8326e51fcdffc886294c1e70b11ddccec836516a343f9ed0f82aac043c24a"},
-    {file = "lxml-5.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:bbc4b80af581e18568ff07f6395c02114d05f4865c2812a1f02f2eaecf0bfd48"},
-    {file = "lxml-5.2.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e3d9d13603410b72787579769469af730c38f2f25505573a5888a94b62b920f8"},
-    {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38b67afb0a06b8575948641c1d6d68e41b83a3abeae2ca9eed2ac59892b36706"},
-    {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c689d0d5381f56de7bd6966a4541bff6e08bf8d3871bbd89a0c6ab18aa699573"},
-    {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:cf2a978c795b54c539f47964ec05e35c05bd045db5ca1e8366988c7f2fe6b3ce"},
-    {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:739e36ef7412b2bd940f75b278749106e6d025e40027c0b94a17ef7968d55d56"},
-    {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d8bbcd21769594dbba9c37d3c819e2d5847656ca99c747ddb31ac1701d0c0ed9"},
-    {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:2304d3c93f2258ccf2cf7a6ba8c761d76ef84948d87bf9664e14d203da2cd264"},
-    {file = "lxml-5.2.2-cp36-cp36m-win32.whl", hash = "sha256:02437fb7308386867c8b7b0e5bc4cd4b04548b1c5d089ffb8e7b31009b961dc3"},
-    {file = "lxml-5.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:edcfa83e03370032a489430215c1e7783128808fd3e2e0a3225deee278585196"},
-    {file = "lxml-5.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28bf95177400066596cdbcfc933312493799382879da504633d16cf60bba735b"},
-    {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a745cc98d504d5bd2c19b10c79c61c7c3df9222629f1b6210c0368177589fb8"},
-    {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b590b39ef90c6b22ec0be925b211298e810b4856909c8ca60d27ffbca6c12e6"},
-    {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b336b0416828022bfd5a2e3083e7f5ba54b96242159f83c7e3eebaec752f1716"},
-    {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:c2faf60c583af0d135e853c86ac2735ce178f0e338a3c7f9ae8f622fd2eb788c"},
-    {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:4bc6cb140a7a0ad1f7bc37e018d0ed690b7b6520ade518285dc3171f7a117905"},
-    {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7ff762670cada8e05b32bf1e4dc50b140790909caa8303cfddc4d702b71ea184"},
-    {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:57f0a0bbc9868e10ebe874e9f129d2917750adf008fe7b9c1598c0fbbfdde6a6"},
-    {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:a6d2092797b388342c1bc932077ad232f914351932353e2e8706851c870bca1f"},
-    {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:60499fe961b21264e17a471ec296dcbf4365fbea611bf9e303ab69db7159ce61"},
-    {file = "lxml-5.2.2-cp37-cp37m-win32.whl", hash = "sha256:d9b342c76003c6b9336a80efcc766748a333573abf9350f4094ee46b006ec18f"},
-    {file = "lxml-5.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b16db2770517b8799c79aa80f4053cd6f8b716f21f8aca962725a9565ce3ee40"},
-    {file = "lxml-5.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7ed07b3062b055d7a7f9d6557a251cc655eed0b3152b76de619516621c56f5d3"},
-    {file = "lxml-5.2.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f60fdd125d85bf9c279ffb8e94c78c51b3b6a37711464e1f5f31078b45002421"},
-    {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a7e24cb69ee5f32e003f50e016d5fde438010c1022c96738b04fc2423e61706"},
-    {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23cfafd56887eaed93d07bc4547abd5e09d837a002b791e9767765492a75883f"},
-    {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19b4e485cd07b7d83e3fe3b72132e7df70bfac22b14fe4bf7a23822c3a35bff5"},
-    {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7ce7ad8abebe737ad6143d9d3bf94b88b93365ea30a5b81f6877ec9c0dee0a48"},
-    {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e49b052b768bb74f58c7dda4e0bdf7b79d43a9204ca584ffe1fb48a6f3c84c66"},
-    {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d14a0d029a4e176795cef99c056d58067c06195e0c7e2dbb293bf95c08f772a3"},
-    {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:be49ad33819d7dcc28a309b86d4ed98e1a65f3075c6acd3cd4fe32103235222b"},
-    {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a6d17e0370d2516d5bb9062c7b4cb731cff921fc875644c3d751ad857ba9c5b1"},
-    {file = "lxml-5.2.2-cp38-cp38-win32.whl", hash = "sha256:5b8c041b6265e08eac8a724b74b655404070b636a8dd6d7a13c3adc07882ef30"},
-    {file = "lxml-5.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:f61efaf4bed1cc0860e567d2ecb2363974d414f7f1f124b1df368bbf183453a6"},
-    {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb91819461b1b56d06fa4bcf86617fac795f6a99d12239fb0c68dbeba41a0a30"},
-    {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d4ed0c7cbecde7194cd3228c044e86bf73e30a23505af852857c09c24e77ec5d"},
-    {file = "lxml-5.2.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54401c77a63cc7d6dc4b4e173bb484f28a5607f3df71484709fe037c92d4f0ed"},
-    {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:625e3ef310e7fa3a761d48ca7ea1f9d8718a32b1542e727d584d82f4453d5eeb"},
-    {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:519895c99c815a1a24a926d5b60627ce5ea48e9f639a5cd328bda0515ea0f10c"},
-    {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c7079d5eb1c1315a858bbf180000757db8ad904a89476653232db835c3114001"},
-    {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:343ab62e9ca78094f2306aefed67dcfad61c4683f87eee48ff2fd74902447726"},
-    {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:cd9e78285da6c9ba2d5c769628f43ef66d96ac3085e59b10ad4f3707980710d3"},
-    {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:546cf886f6242dff9ec206331209db9c8e1643ae642dea5fdbecae2453cb50fd"},
-    {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:02f6a8eb6512fdc2fd4ca10a49c341c4e109aa6e9448cc4859af5b949622715a"},
-    {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:339ee4a4704bc724757cd5dd9dc8cf4d00980f5d3e6e06d5847c1b594ace68ab"},
-    {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0a028b61a2e357ace98b1615fc03f76eb517cc028993964fe08ad514b1e8892d"},
-    {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f90e552ecbad426eab352e7b2933091f2be77115bb16f09f78404861c8322981"},
-    {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:d83e2d94b69bf31ead2fa45f0acdef0757fa0458a129734f59f67f3d2eb7ef32"},
-    {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a02d3c48f9bb1e10c7788d92c0c7db6f2002d024ab6e74d6f45ae33e3d0288a3"},
-    {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6d68ce8e7b2075390e8ac1e1d3a99e8b6372c694bbe612632606d1d546794207"},
-    {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:453d037e09a5176d92ec0fd282e934ed26d806331a8b70ab431a81e2fbabf56d"},
-    {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3b019d4ee84b683342af793b56bb35034bd749e4cbdd3d33f7d1107790f8c472"},
-    {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb3942960f0beb9f46e2a71a3aca220d1ca32feb5a398656be934320804c0df9"},
-    {file = "lxml-5.2.2-cp39-cp39-win32.whl", hash = "sha256:ac6540c9fff6e3813d29d0403ee7a81897f1d8ecc09a8ff84d2eea70ede1cdbf"},
-    {file = "lxml-5.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:610b5c77428a50269f38a534057444c249976433f40f53e3b47e68349cca1425"},
-    {file = "lxml-5.2.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b537bd04d7ccd7c6350cdaaaad911f6312cbd61e6e6045542f781c7f8b2e99d2"},
-    {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4820c02195d6dfb7b8508ff276752f6b2ff8b64ae5d13ebe02e7667e035000b9"},
-    {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a09f6184f17a80897172863a655467da2b11151ec98ba8d7af89f17bf63dae"},
-    {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:76acba4c66c47d27c8365e7c10b3d8016a7da83d3191d053a58382311a8bf4e1"},
-    {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b128092c927eaf485928cec0c28f6b8bead277e28acf56800e972aa2c2abd7a2"},
-    {file = "lxml-5.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ae791f6bd43305aade8c0e22f816b34f3b72b6c820477aab4d18473a37e8090b"},
-    {file = "lxml-5.2.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a2f6a1bc2460e643785a2cde17293bd7a8f990884b822f7bca47bee0a82fc66b"},
-    {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e8d351ff44c1638cb6e980623d517abd9f580d2e53bfcd18d8941c052a5a009"},
-    {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec4bd9133420c5c52d562469c754f27c5c9e36ee06abc169612c959bd7dbb07"},
-    {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:55ce6b6d803890bd3cc89975fca9de1dff39729b43b73cb15ddd933b8bc20484"},
-    {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ab6a358d1286498d80fe67bd3d69fcbc7d1359b45b41e74c4a26964ca99c3f8"},
-    {file = "lxml-5.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:06668e39e1f3c065349c51ac27ae430719d7806c026fec462e5693b08b95696b"},
-    {file = "lxml-5.2.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9cd5323344d8ebb9fb5e96da5de5ad4ebab993bbf51674259dbe9d7a18049525"},
-    {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89feb82ca055af0fe797a2323ec9043b26bc371365847dbe83c7fd2e2f181c34"},
-    {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e481bba1e11ba585fb06db666bfc23dbe181dbafc7b25776156120bf12e0d5a6"},
-    {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d6c6ea6a11ca0ff9cd0390b885984ed31157c168565702959c25e2191674a14"},
-    {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3d98de734abee23e61f6b8c2e08a88453ada7d6486dc7cdc82922a03968928db"},
-    {file = "lxml-5.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:69ab77a1373f1e7563e0fb5a29a8440367dec051da6c7405333699d07444f511"},
-    {file = "lxml-5.2.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:34e17913c431f5ae01d8658dbf792fdc457073dcdfbb31dc0cc6ab256e664a8d"},
-    {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f8757b03208c3f50097761be2dea0aba02e94f0dc7023ed73a7bb14ff11eb0"},
-    {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a520b4f9974b0a0a6ed73c2154de57cdfd0c8800f4f15ab2b73238ffed0b36e"},
-    {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5e097646944b66207023bc3c634827de858aebc226d5d4d6d16f0b77566ea182"},
-    {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b5e4ef22ff25bfd4ede5f8fb30f7b24446345f3e79d9b7455aef2836437bc38a"},
-    {file = "lxml-5.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff69a9a0b4b17d78170c73abe2ab12084bdf1691550c5629ad1fe7849433f324"},
-    {file = "lxml-5.2.2.tar.gz", hash = "sha256:bb2dc4898180bea79863d5487e5f9c7c34297414bad54bcd0f0852aee9cfdb87"},
+    {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"},
+    {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"},
+    {file = "lxml-5.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:501d0d7e26b4d261fca8132854d845e4988097611ba2531408ec91cf3fd9d20a"},
+    {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66442c2546446944437df74379e9cf9e9db353e61301d1a0e26482f43f0dd8"},
+    {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e41506fec7a7f9405b14aa2d5c8abbb4dbbd09d88f9496958b6d00cb4d45330"},
+    {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f7d4a670107d75dfe5ad080bed6c341d18c4442f9378c9f58e5851e86eb79965"},
+    {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41ce1f1e2c7755abfc7e759dc34d7d05fd221723ff822947132dc934d122fe22"},
+    {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:44264ecae91b30e5633013fb66f6ddd05c006d3e0e884f75ce0b4755b3e3847b"},
+    {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:3c174dc350d3ec52deb77f2faf05c439331d6ed5e702fc247ccb4e6b62d884b7"},
+    {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:2dfab5fa6a28a0b60a20638dc48e6343c02ea9933e3279ccb132f555a62323d8"},
+    {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b1c8c20847b9f34e98080da785bb2336ea982e7f913eed5809e5a3c872900f32"},
+    {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c86bf781b12ba417f64f3422cfc302523ac9cd1d8ae8c0f92a1c66e56ef2e86"},
+    {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c162b216070f280fa7da844531169be0baf9ccb17263cf5a8bf876fcd3117fa5"},
+    {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:36aef61a1678cb778097b4a6eeae96a69875d51d1e8f4d4b491ab3cfb54b5a03"},
+    {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f65e5120863c2b266dbcc927b306c5b78e502c71edf3295dfcb9501ec96e5fc7"},
+    {file = "lxml-5.3.0-cp310-cp310-win32.whl", hash = "sha256:ef0c1fe22171dd7c7c27147f2e9c3e86f8bdf473fed75f16b0c2e84a5030ce80"},
+    {file = "lxml-5.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:052d99051e77a4f3e8482c65014cf6372e61b0a6f4fe9edb98503bb5364cfee3"},
+    {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b"},
+    {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18"},
+    {file = "lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442"},
+    {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4"},
+    {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f"},
+    {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e"},
+    {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c"},
+    {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16"},
+    {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79"},
+    {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080"},
+    {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654"},
+    {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d"},
+    {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763"},
+    {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec"},
+    {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be"},
+    {file = "lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9"},
+    {file = "lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1"},
+    {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859"},
+    {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e"},
+    {file = "lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f"},
+    {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e"},
+    {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179"},
+    {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a"},
+    {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3"},
+    {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1"},
+    {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d"},
+    {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c"},
+    {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99"},
+    {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff"},
+    {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a"},
+    {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8"},
+    {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d"},
+    {file = "lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30"},
+    {file = "lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f"},
+    {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a"},
+    {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd"},
+    {file = "lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51"},
+    {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b"},
+    {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002"},
+    {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4"},
+    {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492"},
+    {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3"},
+    {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4"},
+    {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367"},
+    {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832"},
+    {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff"},
+    {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd"},
+    {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb"},
+    {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b"},
+    {file = "lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957"},
+    {file = "lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d"},
+    {file = "lxml-5.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8f0de2d390af441fe8b2c12626d103540b5d850d585b18fcada58d972b74a74e"},
+    {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1afe0a8c353746e610bd9031a630a95bcfb1a720684c3f2b36c4710a0a96528f"},
+    {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56b9861a71575f5795bde89256e7467ece3d339c9b43141dbdd54544566b3b94"},
+    {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:9fb81d2824dff4f2e297a276297e9031f46d2682cafc484f49de182aa5e5df99"},
+    {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2c226a06ecb8cdef28845ae976da407917542c5e6e75dcac7cc33eb04aaeb237"},
+    {file = "lxml-5.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7d3d1ca42870cdb6d0d29939630dbe48fa511c203724820fc0fd507b2fb46577"},
+    {file = "lxml-5.3.0-cp36-cp36m-win32.whl", hash = "sha256:094cb601ba9f55296774c2d57ad68730daa0b13dc260e1f941b4d13678239e70"},
+    {file = "lxml-5.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:eafa2c8658f4e560b098fe9fc54539f86528651f61849b22111a9b107d18910c"},
+    {file = "lxml-5.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb83f8a875b3d9b458cada4f880fa498646874ba4011dc974e071a0a84a1b033"},
+    {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25f1b69d41656b05885aa185f5fdf822cb01a586d1b32739633679699f220391"},
+    {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23e0553b8055600b3bf4a00b255ec5c92e1e4aebf8c2c09334f8368e8bd174d6"},
+    {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ada35dd21dc6c039259596b358caab6b13f4db4d4a7f8665764d616daf9cc1d"},
+    {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:81b4e48da4c69313192d8c8d4311e5d818b8be1afe68ee20f6385d0e96fc9512"},
+    {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:2bc9fd5ca4729af796f9f59cd8ff160fe06a474da40aca03fcc79655ddee1a8b"},
+    {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07da23d7ee08577760f0a71d67a861019103e4812c87e2fab26b039054594cc5"},
+    {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ea2e2f6f801696ad7de8aec061044d6c8c0dd4037608c7cab38a9a4d316bfb11"},
+    {file = "lxml-5.3.0-cp37-cp37m-win32.whl", hash = "sha256:5c54afdcbb0182d06836cc3d1be921e540be3ebdf8b8a51ee3ef987537455f84"},
+    {file = "lxml-5.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2901429da1e645ce548bf9171784c0f74f0718c3f6150ce166be39e4dd66c3e"},
+    {file = "lxml-5.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c56a1d43b2f9ee4786e4658c7903f05da35b923fb53c11025712562d5cc02753"},
+    {file = "lxml-5.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ee8c39582d2652dcd516d1b879451500f8db3fe3607ce45d7c5957ab2596040"},
+    {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdf3a3059611f7585a78ee10399a15566356116a4288380921a4b598d807a22"},
+    {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:146173654d79eb1fc97498b4280c1d3e1e5d58c398fa530905c9ea50ea849b22"},
+    {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0a7056921edbdd7560746f4221dca89bb7a3fe457d3d74267995253f46343f15"},
+    {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:9e4b47ac0f5e749cfc618efdf4726269441014ae1d5583e047b452a32e221920"},
+    {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f914c03e6a31deb632e2daa881fe198461f4d06e57ac3d0e05bbcab8eae01945"},
+    {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:213261f168c5e1d9b7535a67e68b1f59f92398dd17a56d934550837143f79c42"},
+    {file = "lxml-5.3.0-cp38-cp38-win32.whl", hash = "sha256:218c1b2e17a710e363855594230f44060e2025b05c80d1f0661258142b2add2e"},
+    {file = "lxml-5.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:315f9542011b2c4e1d280e4a20ddcca1761993dda3afc7a73b01235f8641e903"},
+    {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ffc23010330c2ab67fac02781df60998ca8fe759e8efde6f8b756a20599c5de"},
+    {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2b3778cb38212f52fac9fe913017deea2fdf4eb1a4f8e4cfc6b009a13a6d3fcc"},
+    {file = "lxml-5.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b0c7a688944891086ba192e21c5229dea54382f4836a209ff8d0a660fac06be"},
+    {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:747a3d3e98e24597981ca0be0fd922aebd471fa99d0043a3842d00cdcad7ad6a"},
+    {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86a6b24b19eaebc448dc56b87c4865527855145d851f9fc3891673ff97950540"},
+    {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b11a5d918a6216e521c715b02749240fb07ae5a1fefd4b7bf12f833bc8b4fe70"},
+    {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b87753c784d6acb8a25b05cb526c3406913c9d988d51f80adecc2b0775d6aa"},
+    {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:109fa6fede314cc50eed29e6e56c540075e63d922455346f11e4d7a036d2b8cf"},
+    {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:02ced472497b8362c8e902ade23e3300479f4f43e45f4105c85ef43b8db85229"},
+    {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:6b038cc86b285e4f9fea2ba5ee76e89f21ed1ea898e287dc277a25884f3a7dfe"},
+    {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7437237c6a66b7ca341e868cda48be24b8701862757426852c9b3186de1da8a2"},
+    {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7f41026c1d64043a36fda21d64c5026762d53a77043e73e94b71f0521939cc71"},
+    {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:482c2f67761868f0108b1743098640fbb2a28a8e15bf3f47ada9fa59d9fe08c3"},
+    {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1483fd3358963cc5c1c9b122c80606a3a79ee0875bcac0204149fa09d6ff2727"},
+    {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dec2d1130a9cda5b904696cec33b2cfb451304ba9081eeda7f90f724097300a"},
+    {file = "lxml-5.3.0-cp39-cp39-win32.whl", hash = "sha256:a0eabd0a81625049c5df745209dc7fcef6e2aea7793e5f003ba363610aa0a3ff"},
+    {file = "lxml-5.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:89e043f1d9d341c52bf2af6d02e6adde62e0a46e6755d5eb60dc6e4f0b8aeca2"},
+    {file = "lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c"},
+    {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a"},
+    {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005"},
+    {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:8dc2c0395bea8254d8daebc76dcf8eb3a95ec2a46fa6fae5eaccee366bfe02ce"},
+    {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6ba0d3dcac281aad8a0e5b14c7ed6f9fa89c8612b47939fc94f80b16e2e9bc83"},
+    {file = "lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba"},
+    {file = "lxml-5.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:94d6c3782907b5e40e21cadf94b13b0842ac421192f26b84c45f13f3c9d5dc27"},
+    {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c300306673aa0f3ed5ed9372b21867690a17dba38c68c44b287437c362ce486b"},
+    {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d9b952e07aed35fe2e1a7ad26e929595412db48535921c5013edc8aa4a35ce"},
+    {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:01220dca0d066d1349bd6a1726856a78f7929f3878f7e2ee83c296c69495309e"},
+    {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2d9b8d9177afaef80c53c0a9e30fa252ff3036fb1c6494d427c066a4ce6a282f"},
+    {file = "lxml-5.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:20094fc3f21ea0a8669dc4c61ed7fa8263bd37d97d93b90f28fc613371e7a875"},
+    {file = "lxml-5.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ace2c2326a319a0bb8a8b0e5b570c764962e95818de9f259ce814ee666603f19"},
+    {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92e67a0be1639c251d21e35fe74df6bcc40cba445c2cda7c4a967656733249e2"},
+    {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5350b55f9fecddc51385463a4f67a5da829bc741e38cf689f38ec9023f54ab"},
+    {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c1fefd7e3d00921c44dc9ca80a775af49698bbfd92ea84498e56acffd4c5469"},
+    {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:71a8dd38fbd2f2319136d4ae855a7078c69c9a38ae06e0c17c73fd70fc6caad8"},
+    {file = "lxml-5.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:97acf1e1fd66ab53dacd2c35b319d7e548380c2e9e8c54525c6e76d21b1ae3b1"},
+    {file = "lxml-5.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:68934b242c51eb02907c5b81d138cb977b2129a0a75a8f8b60b01cb8586c7b21"},
+    {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b710bc2b8292966b23a6a0121f7a6c51d45d2347edcc75f016ac123b8054d3f2"},
+    {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18feb4b93302091b1541221196a2155aa296c363fd233814fa11e181adebc52f"},
+    {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3eb44520c4724c2e1a57c0af33a379eee41792595023f367ba3952a2d96c2aab"},
+    {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:609251a0ca4770e5a8768ff902aa02bf636339c5a93f9349b48eb1f606f7f3e9"},
+    {file = "lxml-5.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:516f491c834eb320d6c843156440fe7fc0d50b33e44387fcec5b02f0bc118a4c"},
+    {file = "lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f"},
 ]
 
 [package.extras]
@@ -1159,7 +1086,7 @@ cssselect = ["cssselect (>=0.7)"]
 html-clean = ["lxml-html-clean"]
 html5 = ["html5lib"]
 htmlsoup = ["BeautifulSoup4"]
-source = ["Cython (>=3.0.10)"]
+source = ["Cython (>=3.0.11)"]
 
 [[package]]
 name = "lxml-stubs"
@@ -1319,109 +1246,117 @@ files = [
 
 [[package]]
 name = "msgpack"
-version = "1.0.8"
+version = "1.1.0"
 description = "MessagePack serializer"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:505fe3d03856ac7d215dbe005414bc28505d26f0c128906037e66d98c4e95868"},
-    {file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6b7842518a63a9f17107eb176320960ec095a8ee3b4420b5f688e24bf50c53c"},
-    {file = "msgpack-1.0.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:376081f471a2ef24828b83a641a02c575d6103a3ad7fd7dade5486cad10ea659"},
-    {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e390971d082dba073c05dbd56322427d3280b7cc8b53484c9377adfbae67dc2"},
-    {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e073efcba9ea99db5acef3959efa45b52bc67b61b00823d2a1a6944bf45982"},
-    {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82d92c773fbc6942a7a8b520d22c11cfc8fd83bba86116bfcf962c2f5c2ecdaa"},
-    {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9ee32dcb8e531adae1f1ca568822e9b3a738369b3b686d1477cbc643c4a9c128"},
-    {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e3aa7e51d738e0ec0afbed661261513b38b3014754c9459508399baf14ae0c9d"},
-    {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69284049d07fce531c17404fcba2bb1df472bc2dcdac642ae71a2d079d950653"},
-    {file = "msgpack-1.0.8-cp310-cp310-win32.whl", hash = "sha256:13577ec9e247f8741c84d06b9ece5f654920d8365a4b636ce0e44f15e07ec693"},
-    {file = "msgpack-1.0.8-cp310-cp310-win_amd64.whl", hash = "sha256:e532dbd6ddfe13946de050d7474e3f5fb6ec774fbb1a188aaf469b08cf04189a"},
-    {file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9517004e21664f2b5a5fd6333b0731b9cf0817403a941b393d89a2f1dc2bd836"},
-    {file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d16a786905034e7e34098634b184a7d81f91d4c3d246edc6bd7aefb2fd8ea6ad"},
-    {file = "msgpack-1.0.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2872993e209f7ed04d963e4b4fbae72d034844ec66bc4ca403329db2074377b"},
-    {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c330eace3dd100bdb54b5653b966de7f51c26ec4a7d4e87132d9b4f738220ba"},
-    {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b5c044f3eff2a6534768ccfd50425939e7a8b5cf9a7261c385de1e20dcfc85"},
-    {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1876b0b653a808fcd50123b953af170c535027bf1d053b59790eebb0aeb38950"},
-    {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dfe1f0f0ed5785c187144c46a292b8c34c1295c01da12e10ccddfc16def4448a"},
-    {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3528807cbbb7f315bb81959d5961855e7ba52aa60a3097151cb21956fbc7502b"},
-    {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e2f879ab92ce502a1e65fce390eab619774dda6a6ff719718069ac94084098ce"},
-    {file = "msgpack-1.0.8-cp311-cp311-win32.whl", hash = "sha256:26ee97a8261e6e35885c2ecd2fd4a6d38252246f94a2aec23665a4e66d066305"},
-    {file = "msgpack-1.0.8-cp311-cp311-win_amd64.whl", hash = "sha256:eadb9f826c138e6cf3c49d6f8de88225a3c0ab181a9b4ba792e006e5292d150e"},
-    {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:114be227f5213ef8b215c22dde19532f5da9652e56e8ce969bf0a26d7c419fee"},
-    {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d661dc4785affa9d0edfdd1e59ec056a58b3dbb9f196fa43587f3ddac654ac7b"},
-    {file = "msgpack-1.0.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d56fd9f1f1cdc8227d7b7918f55091349741904d9520c65f0139a9755952c9e8"},
-    {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0726c282d188e204281ebd8de31724b7d749adebc086873a59efb8cf7ae27df3"},
-    {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8db8e423192303ed77cff4dce3a4b88dbfaf43979d280181558af5e2c3c71afc"},
-    {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99881222f4a8c2f641f25703963a5cefb076adffd959e0558dc9f803a52d6a58"},
-    {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b5505774ea2a73a86ea176e8a9a4a7c8bf5d521050f0f6f8426afe798689243f"},
-    {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ef254a06bcea461e65ff0373d8a0dd1ed3aa004af48839f002a0c994a6f72d04"},
-    {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e1dd7839443592d00e96db831eddb4111a2a81a46b028f0facd60a09ebbdd543"},
-    {file = "msgpack-1.0.8-cp312-cp312-win32.whl", hash = "sha256:64d0fcd436c5683fdd7c907eeae5e2cbb5eb872fafbc03a43609d7941840995c"},
-    {file = "msgpack-1.0.8-cp312-cp312-win_amd64.whl", hash = "sha256:74398a4cf19de42e1498368c36eed45d9528f5fd0155241e82c4082b7e16cffd"},
-    {file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ceea77719d45c839fd73abcb190b8390412a890df2f83fb8cf49b2a4b5c2f40"},
-    {file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ab0bbcd4d1f7b6991ee7c753655b481c50084294218de69365f8f1970d4c151"},
-    {file = "msgpack-1.0.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1cce488457370ffd1f953846f82323cb6b2ad2190987cd4d70b2713e17268d24"},
-    {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3923a1778f7e5ef31865893fdca12a8d7dc03a44b33e2a5f3295416314c09f5d"},
-    {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22e47578b30a3e199ab067a4d43d790249b3c0587d9a771921f86250c8435db"},
-    {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd739c9251d01e0279ce729e37b39d49a08c0420d3fee7f2a4968c0576678f77"},
-    {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d3420522057ebab1728b21ad473aa950026d07cb09da41103f8e597dfbfaeb13"},
-    {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5845fdf5e5d5b78a49b826fcdc0eb2e2aa7191980e3d2cfd2a30303a74f212e2"},
-    {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a0e76621f6e1f908ae52860bdcb58e1ca85231a9b0545e64509c931dd34275a"},
-    {file = "msgpack-1.0.8-cp38-cp38-win32.whl", hash = "sha256:374a8e88ddab84b9ada695d255679fb99c53513c0a51778796fcf0944d6c789c"},
-    {file = "msgpack-1.0.8-cp38-cp38-win_amd64.whl", hash = "sha256:f3709997b228685fe53e8c433e2df9f0cdb5f4542bd5114ed17ac3c0129b0480"},
-    {file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f51bab98d52739c50c56658cc303f190785f9a2cd97b823357e7aeae54c8f68a"},
-    {file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:73ee792784d48aa338bba28063e19a27e8d989344f34aad14ea6e1b9bd83f596"},
-    {file = "msgpack-1.0.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f9904e24646570539a8950400602d66d2b2c492b9010ea7e965025cb71d0c86d"},
-    {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e75753aeda0ddc4c28dce4c32ba2f6ec30b1b02f6c0b14e547841ba5b24f753f"},
-    {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dbf059fb4b7c240c873c1245ee112505be27497e90f7c6591261c7d3c3a8228"},
-    {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4916727e31c28be8beaf11cf117d6f6f188dcc36daae4e851fee88646f5b6b18"},
-    {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7938111ed1358f536daf311be244f34df7bf3cdedb3ed883787aca97778b28d8"},
-    {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:493c5c5e44b06d6c9268ce21b302c9ca055c1fd3484c25ba41d34476c76ee746"},
-    {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273"},
-    {file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"},
-    {file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"},
-    {file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"},
+    {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"},
+    {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"},
+    {file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"},
+    {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"},
+    {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"},
+    {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"},
+    {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"},
+    {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"},
+    {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"},
+    {file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"},
+    {file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"},
+    {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"},
+    {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"},
+    {file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"},
+    {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"},
+    {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"},
+    {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"},
+    {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"},
+    {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"},
+    {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"},
+    {file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"},
+    {file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"},
+    {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"},
+    {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"},
+    {file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"},
+    {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"},
+    {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"},
+    {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"},
+    {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"},
+    {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"},
+    {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"},
+    {file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"},
+    {file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"},
+    {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"},
+    {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"},
+    {file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"},
+    {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"},
+    {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"},
+    {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"},
+    {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"},
+    {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"},
+    {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"},
+    {file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"},
+    {file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"},
+    {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"},
+    {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"},
+    {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"},
+    {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"},
+    {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"},
+    {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"},
+    {file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"},
+    {file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"},
+    {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"},
+    {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"},
+    {file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"},
+    {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"},
+    {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"},
+    {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"},
+    {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"},
+    {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"},
+    {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"},
+    {file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"},
+    {file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"},
+    {file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"},
 ]
 
 [[package]]
 name = "mypy"
-version = "1.9.0"
+version = "1.11.2"
 description = "Optional static typing for Python"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"},
-    {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"},
-    {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"},
-    {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"},
-    {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"},
-    {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"},
-    {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"},
-    {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"},
-    {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"},
-    {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"},
-    {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"},
-    {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"},
-    {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"},
-    {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"},
-    {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"},
-    {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"},
-    {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"},
-    {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"},
-    {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"},
-    {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"},
-    {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"},
-    {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"},
-    {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"},
-    {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"},
-    {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"},
-    {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"},
-    {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"},
+    {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"},
+    {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"},
+    {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"},
+    {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"},
+    {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"},
+    {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"},
+    {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"},
+    {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"},
+    {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"},
+    {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"},
+    {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"},
+    {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"},
+    {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"},
+    {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"},
+    {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"},
+    {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"},
+    {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"},
+    {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"},
+    {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"},
+    {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"},
+    {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"},
+    {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"},
+    {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"},
+    {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"},
+    {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"},
+    {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"},
+    {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"},
 ]
 
 [package.dependencies]
 mypy-extensions = ">=1.0.0"
 tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
-typing-extensions = ">=4.1.0"
+typing-extensions = ">=4.6.0"
 
 [package.extras]
 dmypy = ["psutil (>=4.0)"]
@@ -1442,17 +1377,17 @@ files = [
 
 [[package]]
 name = "mypy-zope"
-version = "1.0.4"
+version = "1.0.7"
 description = "Plugin for mypy to support zope interfaces"
 optional = false
 python-versions = "*"
 files = [
-    {file = "mypy-zope-1.0.4.tar.gz", hash = "sha256:a9569e73ae85a65247787d98590fa6d4290e76f26aabe035d1c3e94a0b9ab6ee"},
-    {file = "mypy_zope-1.0.4-py3-none-any.whl", hash = "sha256:c7298f93963a84f2b145c2b5cc98709fc2a5be4adf54bfe23fa7fdd8fd19c975"},
+    {file = "mypy_zope-1.0.7-py3-none-any.whl", hash = "sha256:f19de249574319d81083b15f8a022c6b15583582f23340a860922141f1b651ca"},
+    {file = "mypy_zope-1.0.7.tar.gz", hash = "sha256:32a79ce78647c0bea61e7e0c0eb1233fcb97bb94e8950cca73f17d3419c602f7"},
 ]
 
 [package.dependencies]
-mypy = ">=1.0.0,<1.10.0"
+mypy = ">=1.0.0,<1.12.0"
 "zope.interface" = "*"
 "zope.schema" = "*"
 
@@ -1511,108 +1446,108 @@ files = [
 [package.extras]
 dev = ["jinja2"]
 
-[[package]]
-name = "pathspec"
-version = "0.11.1"
-description = "Utility library for gitignore style pattern matching of file paths."
-optional = false
-python-versions = ">=3.7"
-files = [
-    {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"},
-    {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"},
-]
-
 [[package]]
 name = "phonenumbers"
-version = "8.13.39"
+version = "8.13.47"
 description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
 optional = false
 python-versions = "*"
 files = [
-    {file = "phonenumbers-8.13.39-py2.py3-none-any.whl", hash = "sha256:3ad2d086fa71e7eef409001b9195ac54bebb0c6e3e752209b558ca192c9229a0"},
-    {file = "phonenumbers-8.13.39.tar.gz", hash = "sha256:db7ca4970d206b2056231105300753b1a5b229f43416f8c2b3010e63fbb68d77"},
+    {file = "phonenumbers-8.13.47-py2.py3-none-any.whl", hash = "sha256:5d3c0142ef7055ca5551884352e3b6b93bfe002a0bc95b8eaba39b0e2184541b"},
+    {file = "phonenumbers-8.13.47.tar.gz", hash = "sha256:53c5e7c6d431cafe4efdd44956078404ae9bc8b0eacc47be3105d3ccc88aaffa"},
 ]
 
 [[package]]
 name = "pillow"
-version = "10.3.0"
+version = "10.4.0"
 description = "Python Imaging Library (Fork)"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"},
-    {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"},
-    {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"},
-    {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"},
-    {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"},
-    {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"},
-    {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"},
-    {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"},
-    {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"},
-    {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"},
-    {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"},
-    {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"},
-    {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"},
-    {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"},
-    {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"},
-    {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"},
-    {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"},
-    {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"},
-    {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"},
-    {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"},
-    {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"},
-    {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"},
-    {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"},
-    {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"},
-    {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"},
-    {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"},
-    {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"},
-    {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"},
-    {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"},
-    {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"},
-    {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"},
-    {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"},
-    {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"},
-    {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"},
-    {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"},
-    {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"},
-    {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"},
-    {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"},
-    {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"},
-    {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"},
-    {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"},
-    {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"},
-    {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"},
-    {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"},
-    {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"},
-    {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"},
-    {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"},
-    {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"},
-    {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"},
-    {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"},
-    {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"},
-    {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"},
-    {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"},
-    {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"},
-    {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"},
-    {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"},
-    {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"},
-    {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"},
-    {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"},
-    {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"},
-    {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"},
-    {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"},
-    {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"},
-    {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"},
-    {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"},
-    {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"},
-    {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"},
-    {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"},
-    {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"},
+    {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"},
+    {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"},
+    {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"},
+    {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"},
+    {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"},
+    {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"},
+    {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"},
+    {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"},
+    {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"},
+    {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"},
+    {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"},
+    {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"},
+    {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"},
+    {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"},
+    {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"},
+    {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"},
+    {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"},
+    {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"},
+    {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"},
+    {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"},
+    {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"},
+    {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"},
+    {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"},
+    {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"},
+    {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"},
+    {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"},
+    {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"},
+    {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"},
+    {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"},
+    {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"},
+    {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"},
+    {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"},
+    {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"},
+    {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"},
+    {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"},
+    {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"},
+    {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"},
+    {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"},
+    {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"},
+    {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"},
+    {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"},
+    {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"},
+    {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"},
+    {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"},
+    {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"},
+    {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"},
+    {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"},
+    {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"},
+    {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"},
+    {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"},
+    {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"},
+    {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"},
+    {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"},
+    {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"},
+    {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"},
+    {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"},
+    {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"},
+    {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"},
+    {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"},
+    {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"},
+    {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"},
+    {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"},
+    {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"},
+    {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"},
+    {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"},
+    {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"},
+    {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"},
+    {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"},
+    {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"},
+    {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"},
+    {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"},
+    {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"},
+    {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"},
+    {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"},
+    {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"},
+    {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"},
+    {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"},
+    {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"},
+    {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"},
+    {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"},
 ]
 
 [package.extras]
-docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
+docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
 fpx = ["olefile"]
 mic = ["olefile"]
 tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
@@ -1644,30 +1579,15 @@ files = [
     {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
 ]
 
-[[package]]
-name = "platformdirs"
-version = "3.1.1"
-description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
-optional = false
-python-versions = ">=3.7"
-files = [
-    {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"},
-    {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"},
-]
-
-[package.extras]
-docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
-test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"]
-
 [[package]]
 name = "prometheus-client"
-version = "0.20.0"
+version = "0.21.0"
 description = "Python client for the Prometheus monitoring system."
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "prometheus_client-0.20.0-py3-none-any.whl", hash = "sha256:cde524a85bce83ca359cc837f28b8c0db5cac7aa653a588fd7e84ba061c329e7"},
-    {file = "prometheus_client-0.20.0.tar.gz", hash = "sha256:287629d00b147a32dcb2be0b9df905da599b2d82f80377083ec8463309a4bb89"},
+    {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"},
+    {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"},
 ]
 
 [package.extras]
@@ -1675,24 +1595,20 @@ twisted = ["twisted"]
 
 [[package]]
 name = "psycopg2"
-version = "2.9.9"
+version = "2.9.10"
 description = "psycopg2 - Python-PostgreSQL Database Adapter"
 optional = true
-python-versions = ">=3.7"
+python-versions = ">=3.8"
 files = [
-    {file = "psycopg2-2.9.9-cp310-cp310-win32.whl", hash = "sha256:38a8dcc6856f569068b47de286b472b7c473ac7977243593a288ebce0dc89516"},
-    {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"},
-    {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"},
-    {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"},
-    {file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"},
-    {file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"},
-    {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"},
-    {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"},
-    {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"},
-    {file = "psycopg2-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:bac58c024c9922c23550af2a581998624d6e02350f4ae9c5f0bc642c633a2d5e"},
-    {file = "psycopg2-2.9.9-cp39-cp39-win32.whl", hash = "sha256:c92811b2d4c9b6ea0285942b2e7cac98a59e166d59c588fe5cfe1eda58e72d59"},
-    {file = "psycopg2-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:de80739447af31525feddeb8effd640782cf5998e1a4e9192ebdf829717e3913"},
-    {file = "psycopg2-2.9.9.tar.gz", hash = "sha256:d1454bde93fb1e224166811694d600e746430c006fbb031ea06ecc2ea41bf156"},
+    {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
+    {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
+    {file = "psycopg2-2.9.10-cp311-cp311-win32.whl", hash = "sha256:47c4f9875125344f4c2b870e41b6aad585901318068acd01de93f3677a6522c2"},
+    {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
+    {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
+    {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
+    {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
+    {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
+    {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
 ]
 
 [[package]]
@@ -1724,24 +1640,24 @@ psycopg2 = "*"
 
 [[package]]
 name = "pyasn1"
-version = "0.6.0"
+version = "0.6.1"
 description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"},
-    {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"},
+    {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"},
+    {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"},
 ]
 
 [[package]]
 name = "pyasn1-modules"
-version = "0.4.0"
+version = "0.4.1"
 description = "A collection of ASN.1-based protocols modules"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"},
-    {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"},
+    {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"},
+    {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"},
 ]
 
 [package.dependencies]
@@ -1760,109 +1676,123 @@ files = [
 
 [[package]]
 name = "pydantic"
-version = "2.7.1"
+version = "2.9.2"
 description = "Data validation using Python type hints"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"},
-    {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"},
+    {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
+    {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
 ]
 
 [package.dependencies]
-annotated-types = ">=0.4.0"
-pydantic-core = "2.18.2"
-typing-extensions = ">=4.6.1"
+annotated-types = ">=0.6.0"
+pydantic-core = "2.23.4"
+typing-extensions = [
+    {version = ">=4.12.2", markers = "python_version >= \"3.13\""},
+    {version = ">=4.6.1", markers = "python_version < \"3.13\""},
+]
 
 [package.extras]
 email = ["email-validator (>=2.0.0)"]
+timezone = ["tzdata"]
 
 [[package]]
 name = "pydantic-core"
-version = "2.18.2"
+version = "2.23.4"
 description = "Core functionality for Pydantic validation and serialization"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"},
-    {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"},
-    {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"},
-    {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"},
-    {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"},
-    {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"},
-    {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"},
-    {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"},
-    {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"},
-    {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"},
-    {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"},
-    {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"},
-    {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"},
-    {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"},
-    {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"},
-    {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"},
-    {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"},
-    {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"},
-    {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"},
-    {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"},
-    {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"},
-    {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"},
-    {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"},
-    {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"},
-    {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"},
-    {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"},
-    {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"},
-    {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"},
-    {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"},
-    {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"},
-    {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"},
-    {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"},
-    {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"},
-    {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"},
-    {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"},
-    {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"},
-    {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"},
-    {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"},
-    {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"},
-    {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"},
-    {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"},
-    {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"},
-    {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"},
-    {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"},
-    {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"},
-    {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"},
-    {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"},
-    {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"},
-    {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"},
-    {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"},
-    {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"},
-    {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"},
-    {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"},
-    {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"},
-    {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"},
-    {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"},
-    {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"},
-    {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"},
-    {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"},
-    {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"},
-    {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"},
-    {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"},
-    {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"},
-    {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"},
-    {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"},
-    {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"},
-    {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"},
-    {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"},
-    {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"},
-    {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"},
-    {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"},
-    {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"},
-    {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"},
-    {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"},
-    {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"},
-    {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"},
-    {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"},
-    {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"},
-    {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"},
+    {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
+    {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
+    {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
+    {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
+    {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
+    {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
+    {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
+    {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
+    {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
+    {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
+    {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
+    {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
+    {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
+    {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
+    {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
+    {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
+    {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
+    {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
+    {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
+    {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
+    {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
+    {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
+    {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
+    {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
+    {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
+    {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
+    {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
+    {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
+    {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
+    {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
+    {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
+    {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
+    {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
+    {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
+    {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
+    {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
+    {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
+    {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
+    {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
+    {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
+    {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
+    {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
+    {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
+    {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
+    {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
+    {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
+    {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
+    {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
+    {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
+    {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
+    {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
+    {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
+    {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
+    {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
+    {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
+    {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
+    {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
+    {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
+    {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
+    {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
+    {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
+    {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
+    {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
+    {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
+    {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
+    {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
+    {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
+    {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
+    {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
+    {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
+    {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
+    {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
+    {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
+    {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
+    {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
+    {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
+    {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
+    {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
+    {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
+    {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
+    {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
+    {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
+    {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
+    {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
+    {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
+    {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
+    {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
+    {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
+    {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
 ]
 
 [package.dependencies]
@@ -1870,13 +1800,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
 
 [[package]]
 name = "pygithub"
-version = "2.3.0"
+version = "2.4.0"
 description = "Use the full Github API v3"
 optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
 files = [
-    {file = "PyGithub-2.3.0-py3-none-any.whl", hash = "sha256:65b499728be3ce7b0cd2cd760da3b32f0f4d7bc55e5e0677617f90f6564e793e"},
-    {file = "PyGithub-2.3.0.tar.gz", hash = "sha256:0148d7347a1cdeed99af905077010aef81a4dad988b0ba51d4108bf66b443f7e"},
+    {file = "PyGithub-2.4.0-py3-none-any.whl", hash = "sha256:81935aa4bdc939fba98fee1cb47422c09157c56a27966476ff92775602b9ee24"},
+    {file = "pygithub-2.4.0.tar.gz", hash = "sha256:6601e22627e87bac192f1e2e39c6e6f69a43152cfb8f307cee575879320b3051"},
 ]
 
 [package.dependencies]
@@ -1985,17 +1915,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
 
 [[package]]
 name = "pyopenssl"
-version = "24.1.0"
+version = "24.2.1"
 description = "Python wrapper module around the OpenSSL library"
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "pyOpenSSL-24.1.0-py3-none-any.whl", hash = "sha256:17ed5be5936449c5418d1cd269a1a9e9081bc54c17aed272b45856a3d3dc86ad"},
-    {file = "pyOpenSSL-24.1.0.tar.gz", hash = "sha256:cabed4bfaa5df9f1a16c0ef64a0cb65318b5cd077a7eda7d6970131ca2f41a6f"},
+    {file = "pyOpenSSL-24.2.1-py3-none-any.whl", hash = "sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d"},
+    {file = "pyopenssl-24.2.1.tar.gz", hash = "sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95"},
 ]
 
 [package.dependencies]
-cryptography = ">=41.0.5,<43"
+cryptography = ">=41.0.5,<44"
 
 [package.extras]
 docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"]
@@ -2041,18 +1971,15 @@ six = ">=1.5"
 
 [[package]]
 name = "python-multipart"
-version = "0.0.9"
+version = "0.0.12"
 description = "A streaming multipart parser for Python"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"},
-    {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"},
+    {file = "python_multipart-0.0.12-py3-none-any.whl", hash = "sha256:43dcf96cf65888a9cd3423544dd0d75ac10f7aa0c3c28a175bbcd00c9ce1aebf"},
+    {file = "python_multipart-0.0.12.tar.gz", hash = "sha256:045e1f98d719c1ce085ed7f7e1ef9d8ccc8c02ba02b5566d5f7521410ced58cb"},
 ]
 
-[package.extras]
-dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"]
-
 [[package]]
 name = "pytz"
 version = "2022.7.1"
@@ -2077,62 +2004,64 @@ files = [
 
 [[package]]
 name = "pyyaml"
-version = "6.0.1"
+version = "6.0.2"
 description = "YAML parser and emitter for Python"
 optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.8"
 files = [
-    {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
-    {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
-    {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
-    {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
-    {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
-    {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
-    {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
-    {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
-    {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
-    {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
-    {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
-    {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
-    {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
-    {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
-    {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
-    {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
-    {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
-    {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
-    {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
-    {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
-    {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
-    {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
-    {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
-    {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
-    {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
-    {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
-    {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
-    {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
-    {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
-    {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
-    {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
-    {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
-    {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
-    {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
-    {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
-    {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
-    {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
-    {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
-    {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
+    {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
+    {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
+    {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
+    {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
+    {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
+    {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
+    {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
+    {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
+    {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
+    {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
+    {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
+    {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
+    {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
+    {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
+    {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
+    {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
+    {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
+    {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
+    {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
+    {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
+    {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
+    {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
+    {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
+    {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
+    {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
+    {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
+    {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
+    {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
+    {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
+    {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
+    {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
+    {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
+    {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
+    {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
+    {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
+    {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
+    {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
+    {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
+    {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
+    {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
+    {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
+    {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
+    {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
+    {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
+    {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
+    {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
+    {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
+    {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
+    {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
+    {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
+    {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
+    {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
+    {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
 ]
 
 [[package]]
@@ -2345,29 +2274,29 @@ files = [
 
 [[package]]
 name = "ruff"
-version = "0.5.0"
+version = "0.6.9"
 description = "An extremely fast Python linter and code formatter, written in Rust."
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "ruff-0.5.0-py3-none-linux_armv6l.whl", hash = "sha256:ee770ea8ab38918f34e7560a597cc0a8c9a193aaa01bfbd879ef43cb06bd9c4c"},
-    {file = "ruff-0.5.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38f3b8327b3cb43474559d435f5fa65dacf723351c159ed0dc567f7ab735d1b6"},
-    {file = "ruff-0.5.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7594f8df5404a5c5c8f64b8311169879f6cf42142da644c7e0ba3c3f14130370"},
-    {file = "ruff-0.5.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:adc7012d6ec85032bc4e9065110df205752d64010bed5f958d25dbee9ce35de3"},
-    {file = "ruff-0.5.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d505fb93b0fabef974b168d9b27c3960714d2ecda24b6ffa6a87ac432905ea38"},
-    {file = "ruff-0.5.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dc5cfd3558f14513ed0d5b70ce531e28ea81a8a3b1b07f0f48421a3d9e7d80a"},
-    {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:db3ca35265de239a1176d56a464b51557fce41095c37d6c406e658cf80bbb362"},
-    {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1a321c4f68809fddd9b282fab6a8d8db796b270fff44722589a8b946925a2a8"},
-    {file = "ruff-0.5.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c4dfcd8d34b143916994b3876b63d53f56724c03f8c1a33a253b7b1e6bf2a7d"},
-    {file = "ruff-0.5.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81e5facfc9f4a674c6a78c64d38becfbd5e4f739c31fcd9ce44c849f1fad9e4c"},
-    {file = "ruff-0.5.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e589e27971c2a3efff3fadafb16e5aef7ff93250f0134ec4b52052b673cf988d"},
-    {file = "ruff-0.5.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2ffbc3715a52b037bcb0f6ff524a9367f642cdc5817944f6af5479bbb2eb50e"},
-    {file = "ruff-0.5.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cd096e23c6a4f9c819525a437fa0a99d1c67a1b6bb30948d46f33afbc53596cf"},
-    {file = "ruff-0.5.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:46e193b36f2255729ad34a49c9a997d506e58f08555366b2108783b3064a0e1e"},
-    {file = "ruff-0.5.0-py3-none-win32.whl", hash = "sha256:49141d267100f5ceff541b4e06552e98527870eafa1acc9dec9139c9ec5af64c"},
-    {file = "ruff-0.5.0-py3-none-win_amd64.whl", hash = "sha256:e9118f60091047444c1b90952736ee7b1792910cab56e9b9a9ac20af94cd0440"},
-    {file = "ruff-0.5.0-py3-none-win_arm64.whl", hash = "sha256:ed5c4df5c1fb4518abcb57725b576659542bdbe93366f4f329e8f398c4b71178"},
-    {file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"},
+    {file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"},
+    {file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"},
+    {file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"},
+    {file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"},
+    {file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"},
+    {file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"},
+    {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"},
+    {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"},
+    {file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"},
+    {file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"},
+    {file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"},
+    {file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"},
+    {file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"},
+    {file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"},
+    {file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"},
+    {file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"},
+    {file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"},
+    {file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"},
 ]
 
 [[package]]
@@ -2402,13 +2331,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
 
 [[package]]
 name = "sentry-sdk"
-version = "2.6.0"
+version = "2.17.0"
 description = "Python client for Sentry (https://sentry.io)"
 optional = true
 python-versions = ">=3.6"
 files = [
-    {file = "sentry_sdk-2.6.0-py2.py3-none-any.whl", hash = "sha256:422b91cb49378b97e7e8d0e8d5a1069df23689d45262b86f54988a7db264e874"},
-    {file = "sentry_sdk-2.6.0.tar.gz", hash = "sha256:65cc07e9c6995c5e316109f138570b32da3bd7ff8d0d0ee4aaf2628c3dd8127d"},
+    {file = "sentry_sdk-2.17.0-py2.py3-none-any.whl", hash = "sha256:625955884b862cc58748920f9e21efdfb8e0d4f98cca4ab0d3918576d5b606ad"},
+    {file = "sentry_sdk-2.17.0.tar.gz", hash = "sha256:dd0a05352b78ffeacced73a94e86f38b32e2eae15fff5f30ca5abb568a72eacf"},
 ]
 
 [package.dependencies]
@@ -2431,14 +2360,16 @@ falcon = ["falcon (>=1.4)"]
 fastapi = ["fastapi (>=0.79.0)"]
 flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
 grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"]
+http2 = ["httpcore[http2] (==1.*)"]
 httpx = ["httpx (>=0.16.0)"]
 huey = ["huey (>=2)"]
 huggingface-hub = ["huggingface-hub (>=0.22)"]
 langchain = ["langchain (>=0.0.210)"]
+litestar = ["litestar (>=2.0.0)"]
 loguru = ["loguru (>=0.5)"]
 openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"]
 opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
-opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"]
+opentelemetry-experimental = ["opentelemetry-distro"]
 pure-eval = ["asttokens", "executing", "pure-eval"]
 pymongo = ["pymongo (>=3.1)"]
 pyspark = ["pyspark (>=2.4.4)"]
@@ -2448,7 +2379,7 @@ sanic = ["sanic (>=0.8)"]
 sqlalchemy = ["sqlalchemy (>=1.2)"]
 starlette = ["starlette (>=0.19.1)"]
 starlite = ["starlite (>=1.48)"]
-tornado = ["tornado (>=5)"]
+tornado = ["tornado (>=6)"]
 
 [[package]]
 name = "service-identity"
@@ -2476,19 +2407,19 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"]
 
 [[package]]
 name = "setuptools"
-version = "67.6.0"
+version = "72.1.0"
 description = "Easily download, build, install, upgrade, and uninstall Python packages"
 optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
 files = [
-    {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"},
-    {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"},
+    {file = "setuptools-72.1.0-py3-none-any.whl", hash = "sha256:5a03e1860cf56bb6ef48ce186b0e557fdba433237481a9a625176c2831be15d1"},
+    {file = "setuptools-72.1.0.tar.gz", hash = "sha256:8d243eff56d095e5817f796ede6ae32941278f542e0f941867cc05ae52b162ec"},
 ]
 
 [package.extras]
-docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
-testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
-testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
+core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "ordered-set (>=3.1.1)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (<0.4)", "pytest-ruff (>=0.2.1)", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
 
 [[package]]
 name = "setuptools-rust"
@@ -2602,13 +2533,13 @@ twisted = ["twisted"]
 
 [[package]]
 name = "tomli"
-version = "2.0.1"
+version = "2.0.2"
 description = "A lil' TOML parser"
 optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
 files = [
-    {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
-    {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
+    {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
+    {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
 ]
 
 [[package]]
@@ -2633,34 +2564,34 @@ files = [
 
 [[package]]
 name = "towncrier"
-version = "23.11.0"
+version = "24.8.0"
 description = "Building newsfiles for your project."
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "towncrier-23.11.0-py3-none-any.whl", hash = "sha256:2e519ca619426d189e3c98c99558fe8be50c9ced13ea1fc20a4a353a95d2ded7"},
-    {file = "towncrier-23.11.0.tar.gz", hash = "sha256:13937c247e3f8ae20ac44d895cf5f96a60ad46cfdcc1671759530d7837d9ee5d"},
+    {file = "towncrier-24.8.0-py3-none-any.whl", hash = "sha256:9343209592b839209cdf28c339ba45792fbfe9775b5f9c177462fd693e127d8d"},
+    {file = "towncrier-24.8.0.tar.gz", hash = "sha256:013423ee7eed102b2f393c287d22d95f66f1a3ea10a4baa82d298001a7f18af3"},
 ]
 
 [package.dependencies]
 click = "*"
+importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""}
 importlib-resources = {version = ">=5", markers = "python_version < \"3.10\""}
-incremental = "*"
 jinja2 = "*"
 tomli = {version = "*", markers = "python_version < \"3.11\""}
 
 [package.extras]
-dev = ["furo", "packaging", "sphinx (>=5)", "twisted"]
+dev = ["furo (>=2024.05.06)", "nox", "packaging", "sphinx (>=5)", "twisted"]
 
 [[package]]
 name = "treq"
-version = "23.11.0"
+version = "24.9.1"
 description = "High-level Twisted HTTP Client API"
 optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
 files = [
-    {file = "treq-23.11.0-py3-none-any.whl", hash = "sha256:f494c2218d61cab2cabbee37cd6606d3eea9d16cf14190323095c95d22c467e9"},
-    {file = "treq-23.11.0.tar.gz", hash = "sha256:0914ff929fd1632ce16797235260f8bc19d20ff7c459c1deabd65b8c68cbeac5"},
+    {file = "treq-24.9.1-py3-none-any.whl", hash = "sha256:eee4756fd9a857c77f180fd5202b52c518f2d3e2826dce28b89066c03bfc45d0"},
+    {file = "treq-24.9.1.tar.gz", hash = "sha256:15da7fc404f3e4ed59d0abe5f8eef4966fabbe618039a2a23bc7c15305cefea8"},
 ]
 
 [package.dependencies]
@@ -2669,6 +2600,7 @@ hyperlink = ">=21.0.0"
 incremental = "*"
 requests = ">=2.1.0"
 Twisted = {version = ">=22.10.0", extras = ["tls"]}
+typing-extensions = ">=3.10.0"
 
 [package.extras]
 dev = ["httpbin (==0.7.0)", "pep8", "pyflakes", "werkzeug (==2.0.3)"]
@@ -2676,19 +2608,19 @@ docs = ["sphinx (<7.0.0)"]
 
 [[package]]
 name = "twine"
-version = "5.1.0"
+version = "5.1.1"
 description = "Collection of utilities for publishing packages on PyPI"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "twine-5.1.0-py3-none-any.whl", hash = "sha256:fe1d814395bfe50cfbe27783cb74efe93abeac3f66deaeb6c8390e4e92bacb43"},
-    {file = "twine-5.1.0.tar.gz", hash = "sha256:4d74770c88c4fcaf8134d2a6a9d863e40f08255ff7d8e2acb3cbbd57d25f6e9d"},
+    {file = "twine-5.1.1-py3-none-any.whl", hash = "sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997"},
+    {file = "twine-5.1.1.tar.gz", hash = "sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db"},
 ]
 
 [package.dependencies]
 importlib-metadata = ">=3.6"
 keyring = ">=15.1"
-pkginfo = ">=1.8.1"
+pkginfo = ">=1.8.1,<1.11"
 readme-renderer = ">=35.0"
 requests = ">=2.20"
 requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0"
@@ -2698,13 +2630,13 @@ urllib3 = ">=1.26.0"
 
 [[package]]
 name = "twisted"
-version = "24.3.0"
+version = "24.7.0"
 description = "An asynchronous networking framework written in Python"
 optional = false
 python-versions = ">=3.8.0"
 files = [
-    {file = "twisted-24.3.0-py3-none-any.whl", hash = "sha256:039f2e6a49ab5108abd94de187fa92377abe5985c7a72d68d0ad266ba19eae63"},
-    {file = "twisted-24.3.0.tar.gz", hash = "sha256:6b38b6ece7296b5e122c9eb17da2eeab3d98a198f50ca9efd00fb03e5b4fd4ae"},
+    {file = "twisted-24.7.0-py3-none-any.whl", hash = "sha256:734832ef98108136e222b5230075b1079dad8a3fc5637319615619a7725b0c81"},
+    {file = "twisted-24.7.0.tar.gz", hash = "sha256:5a60147f044187a127ec7da96d170d49bcce50c6fd36f594e60f4587eff4d394"},
 ]
 
 [package.dependencies]
@@ -2713,48 +2645,26 @@ automat = ">=0.8.0"
 constantly = ">=15.1"
 hyperlink = ">=17.1.1"
 idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""}
-incremental = ">=22.10.0"
+incremental = ">=24.7.0"
 pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""}
 service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""}
-twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""}
 typing-extensions = ">=4.2.0"
 zope-interface = ">=5"
 
 [package.extras]
-all-non-platform = ["twisted[conch,http2,serial,test,tls]", "twisted[conch,http2,serial,test,tls]"]
+all-non-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
 conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"]
-dev = ["coverage (>=6b1,<7)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "twisted[dev-release]", "twistedchecker (>=0.7,<1.0)"]
+dev = ["coverage (>=7.5,<8.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "python-subunit (>=1.4,<2.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)"]
 dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"]
-gtk-platform = ["pygobject", "pygobject", "twisted[all-non-platform]", "twisted[all-non-platform]"]
+gtk-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pygobject", "pygobject", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
 http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
-macos-platform = ["pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "twisted[all-non-platform]", "twisted[all-non-platform]"]
-mypy = ["mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"]
-osx-platform = ["twisted[macos-platform]", "twisted[macos-platform]"]
+macos-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
+mypy = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "coverage (>=7.5,<8.0)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "idna (>=2.4)", "mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "priority (>=1.1.0,<2.0)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)", "types-pyopenssl", "types-setuptools"]
+osx-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
 serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
 test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"]
 tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"]
-windows-platform = ["pywin32 (!=226)", "pywin32 (!=226)", "twisted[all-non-platform]", "twisted[all-non-platform]"]
-
-[[package]]
-name = "twisted-iocpsupport"
-version = "1.0.2"
-description = "An extension for use in the twisted I/O Completion Ports reactor."
-optional = false
-python-versions = "*"
-files = [
-    {file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"},
-    {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win32.whl", hash = "sha256:985c06a33f5c0dae92c71a036d1ea63872ee86a21dd9b01e1f287486f15524b4"},
-    {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:81b3abe3527b367da0220482820cb12a16c661672b7bcfcde328902890d63323"},
-    {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win32.whl", hash = "sha256:9dbb8823b49f06d4de52721b47de4d3b3026064ef4788ce62b1a21c57c3fff6f"},
-    {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b9fed67cf0f951573f06d560ac2f10f2a4bbdc6697770113a2fc396ea2cb2565"},
-    {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win32.whl", hash = "sha256:b76b4eed9b27fd63ddb0877efdd2d15835fdcb6baa745cb85b66e5d016ac2878"},
-    {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:851b3735ca7e8102e661872390e3bce88f8901bece95c25a0c8bb9ecb8a23d32"},
-    {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win32.whl", hash = "sha256:bf4133139d77fc706d8f572e6b7d82871d82ec7ef25d685c2351bdacfb701415"},
-    {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:306becd6e22ab6e8e4f36b6bdafd9c92e867c98a5ce517b27fdd27760ee7ae41"},
-    {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win32.whl", hash = "sha256:3c61742cb0bc6c1ac117a7e5f422c129832f0c295af49e01d8a6066df8cfc04d"},
-    {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:b435857b9efcbfc12f8c326ef0383f26416272260455bbca2cd8d8eca470c546"},
-    {file = "twisted_iocpsupport-1.0.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:7d972cfa8439bdcb35a7be78b7ef86d73b34b808c74be56dfa785c8a93b851bf"},
-]
+windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "twisted-iocpsupport (>=1.0.2)", "twisted-iocpsupport (>=1.0.2)"]
 
 [[package]]
 name = "txredisapi"
@@ -2823,13 +2733,13 @@ files = [
 
 [[package]]
 name = "types-jsonschema"
-version = "4.22.0.20240610"
+version = "4.23.0.20240813"
 description = "Typing stubs for jsonschema"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-jsonschema-4.22.0.20240610.tar.gz", hash = "sha256:f82ab9fe756e3a2642ea9712c46b403ce61eb380b939b696cff3252af42f65b0"},
-    {file = "types_jsonschema-4.22.0.20240610-py3-none-any.whl", hash = "sha256:89996b9bd1928f820a0e252b2844be21cd2e55d062b6fa1048d88453006ad89e"},
+    {file = "types-jsonschema-4.23.0.20240813.tar.gz", hash = "sha256:c93f48206f209a5bc4608d295ac39f172fb98b9e24159ce577dbd25ddb79a1c0"},
+    {file = "types_jsonschema-4.23.0.20240813-py3-none-any.whl", hash = "sha256:be283e23f0b87547316c2ee6b0fd36d95ea30e921db06478029e10b5b6aa6ac3"},
 ]
 
 [package.dependencies]
@@ -2859,35 +2769,35 @@ files = [
 
 [[package]]
 name = "types-pillow"
-version = "10.2.0.20240520"
+version = "10.2.0.20240822"
 description = "Typing stubs for Pillow"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-Pillow-10.2.0.20240520.tar.gz", hash = "sha256:130b979195465fa1e1676d8e81c9c7c30319e8e95b12fae945e8f0d525213107"},
-    {file = "types_Pillow-10.2.0.20240520-py3-none-any.whl", hash = "sha256:33c36494b380e2a269bb742181bea5d9b00820367822dbd3760f07210a1da23d"},
+    {file = "types-Pillow-10.2.0.20240822.tar.gz", hash = "sha256:559fb52a2ef991c326e4a0d20accb3bb63a7ba8d40eb493e0ecb0310ba52f0d3"},
+    {file = "types_Pillow-10.2.0.20240822-py3-none-any.whl", hash = "sha256:d9dab025aba07aeb12fd50a6799d4eac52a9603488eca09d7662543983f16c5d"},
 ]
 
 [[package]]
 name = "types-psycopg2"
-version = "2.9.21.20240417"
+version = "2.9.21.20240819"
 description = "Typing stubs for psycopg2"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-psycopg2-2.9.21.20240417.tar.gz", hash = "sha256:05db256f4a459fb21a426b8e7fca0656c3539105ff0208eaf6bdaf406a387087"},
-    {file = "types_psycopg2-2.9.21.20240417-py3-none-any.whl", hash = "sha256:644d6644d64ebbe37203229b00771012fb3b3bddd507a129a2e136485990e4f8"},
+    {file = "types-psycopg2-2.9.21.20240819.tar.gz", hash = "sha256:4ed6b47464d6374fa64e5e3b234cea0f710e72123a4596d67ab50b7415a84666"},
+    {file = "types_psycopg2-2.9.21.20240819-py3-none-any.whl", hash = "sha256:c9192311c27d7ad561eef705f1b2df1074f2cdcf445a98a6a2fcaaaad43278cf"},
 ]
 
 [[package]]
 name = "types-pyopenssl"
-version = "24.1.0.20240425"
+version = "24.1.0.20240722"
 description = "Typing stubs for pyOpenSSL"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-pyOpenSSL-24.1.0.20240425.tar.gz", hash = "sha256:0a7e82626c1983dc8dc59292bf20654a51c3c3881bcbb9b337c1da6e32f0204e"},
-    {file = "types_pyOpenSSL-24.1.0.20240425-py3-none-any.whl", hash = "sha256:f51a156835555dd2a1f025621e8c4fbe7493470331afeef96884d1d29bf3a473"},
+    {file = "types-pyOpenSSL-24.1.0.20240722.tar.gz", hash = "sha256:47913b4678a01d879f503a12044468221ed8576263c1540dcb0484ca21b08c39"},
+    {file = "types_pyOpenSSL-24.1.0.20240722-py3-none-any.whl", hash = "sha256:6a7a5d2ec042537934cfb4c9d4deb0e16c4c6250b09358df1f083682fe6fda54"},
 ]
 
 [package.dependencies]
@@ -2896,24 +2806,24 @@ types-cffi = "*"
 
 [[package]]
 name = "types-pyyaml"
-version = "6.0.12.20240311"
+version = "6.0.12.20240917"
 description = "Typing stubs for PyYAML"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"},
-    {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"},
+    {file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"},
+    {file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"},
 ]
 
 [[package]]
 name = "types-requests"
-version = "2.31.0.20240406"
+version = "2.32.0.20241016"
 description = "Typing stubs for requests"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-requests-2.31.0.20240406.tar.gz", hash = "sha256:4428df33c5503945c74b3f42e82b181e86ec7b724620419a2966e2de604ce1a1"},
-    {file = "types_requests-2.31.0.20240406-py3-none-any.whl", hash = "sha256:6216cdac377c6b9a040ac1c0404f7284bd13199c0e1bb235f4324627e8898cf5"},
+    {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"},
+    {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"},
 ]
 
 [package.dependencies]
@@ -2921,13 +2831,13 @@ urllib3 = ">=2"
 
 [[package]]
 name = "types-setuptools"
-version = "70.1.0.20240627"
+version = "75.1.0.20241014"
 description = "Typing stubs for setuptools"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-setuptools-70.1.0.20240627.tar.gz", hash = "sha256:385907a47b5cf302b928ce07953cd91147d5de6f3da604c31905fdf0ec309e83"},
-    {file = "types_setuptools-70.1.0.20240627-py3-none-any.whl", hash = "sha256:c7bdf05cd0a8b66868b4774c7b3c079d01ae025d8c9562bfc8bf2ff44d263c9c"},
+    {file = "types-setuptools-75.1.0.20241014.tar.gz", hash = "sha256:29b0560a8d4b4a91174be085847002c69abfcb048e20b33fc663005aedf56804"},
+    {file = "types_setuptools-75.1.0.20241014-py3-none-any.whl", hash = "sha256:caab58366741fb99673d0138b6e2d760717f154cfb981b74fea5e8de40f0b703"},
 ]
 
 [[package]]
@@ -3085,18 +2995,18 @@ docs = ["Sphinx", "elementpath (>=4.1.5,<5.0.0)", "jinja2", "sphinx-rtd-theme"]
 
 [[package]]
 name = "zipp"
-version = "3.15.0"
+version = "3.19.1"
 description = "Backport of pathlib-compatible object wrapper for zip files"
 optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
 files = [
-    {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"},
-    {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"},
+    {file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"},
+    {file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"},
 ]
 
 [package.extras]
-docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
 
 [[package]]
 name = "zope-event"
@@ -3202,4 +3112,4 @@ user-search = ["pyicu"]
 [metadata]
 lock-version = "2.0"
 python-versions = "^3.8.0"
-content-hash = "3372a97db99050a34f8eddad2ddf8efe8b7b704b6123df4a3e36ddc171e8f34d"
+content-hash = "c8a22f901970b2f851151e731532757fd3acf7ba02930952636d2e6c5c9c0c90"
diff --git a/pylint.cfg b/pylint.cfg
deleted file mode 100644
index 2368997112..0000000000
--- a/pylint.cfg
+++ /dev/null
@@ -1,280 +0,0 @@
-[MASTER]
-
-# Specify a configuration file.
-#rcfile=
-
-# Python code to execute, usually for sys.path manipulation such as
-# pygtk.require().
-#init-hook=
-
-# Profiled execution.
-profile=no
-
-# Add files or directories to the blacklist. They should be base names, not
-# paths.
-ignore=CVS
-
-# Pickle collected data for later comparisons.
-persistent=yes
-
-# List of plugins (as comma separated values of python modules names) to load,
-# usually to register additional checkers.
-load-plugins=
-
-
-[MESSAGES CONTROL]
-
-# Enable the message, report, category or checker with the given id(s). You can
-# either give multiple identifier separated by comma (,) or put this option
-# multiple time. See also the "--disable" option for examples.
-#enable=
-
-# Disable the message, report, category or checker with the given id(s). You
-# can either give multiple identifiers separated by comma (,) or put this
-# option multiple times (only on the command line, not in the configuration
-# file where it should appear only once).You can also use "--disable=all" to
-# disable everything first and then reenable specific checks. For example, if
-# you want to run only the similarities checker, you can use "--disable=all
-# --enable=similarities". If you want to run only the classes checker, but have
-# no Warning level messages displayed, use"--disable=all --enable=classes
-# --disable=W"
-disable=missing-docstring
-
-
-[REPORTS]
-
-# Set the output format. Available formats are text, parseable, colorized, msvs
-# (visual studio) and html. You can also give a reporter class, eg
-# mypackage.mymodule.MyReporterClass.
-output-format=text
-
-# Put messages in a separate file for each module / package specified on the
-# command line instead of printing them on stdout. Reports (if any) will be
-# written in a file name "pylint_global.[txt|html]".
-files-output=no
-
-# Tells whether to display a full report or only the messages
-reports=yes
-
-# Python expression which should return a note less than 10 (10 is the highest
-# note). You have access to the variables errors warning, statement which
-# respectively contain the number of errors / warnings messages and the total
-# number of statements analyzed. This is used by the global evaluation report
-# (RP0004).
-evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
-
-# Add a comment according to your evaluation note. This is used by the global
-# evaluation report (RP0004).
-comment=no
-
-# Template used to display messages. This is a python new-style format string
-# used to format the message information. See doc for all details
-#msg-template=
-
-
-[TYPECHECK]
-
-# Tells whether missing members accessed in mixin class should be ignored. A
-# mixin class is detected if its name ends with "mixin" (case insensitive).
-ignore-mixin-members=yes
-
-# List of classes names for which member attributes should not be checked
-# (useful for classes with attributes dynamically set).
-ignored-classes=SQLObject
-
-# When zope mode is activated, add a predefined set of Zope acquired attributes
-# to generated-members.
-zope=no
-
-# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E0201 when accessed. Python regular
-# expressions are accepted.
-generated-members=REQUEST,acl_users,aq_parent
-
-
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-notes=FIXME,XXX,TODO
-
-
-[SIMILARITIES]
-
-# Minimum lines number of a similarity.
-min-similarity-lines=4
-
-# Ignore comments when computing similarities.
-ignore-comments=yes
-
-# Ignore docstrings when computing similarities.
-ignore-docstrings=yes
-
-# Ignore imports when computing similarities.
-ignore-imports=no
-
-
-[VARIABLES]
-
-# Tells whether we should check for unused import in __init__ files.
-init-import=no
-
-# A regular expression matching the beginning of the name of dummy variables
-# (i.e. not used).
-dummy-variables-rgx=_$|dummy
-
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid to define new builtins when possible.
-additional-builtins=
-
-
-[BASIC]
-
-# Required attributes for module, separated by a comma
-required-attributes=
-
-# List of builtins function names that should not be used, separated by a comma
-bad-functions=map,filter,apply,input
-
-# Regular expression which should only match correct module names
-module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
-
-# Regular expression which should only match correct module level names
-const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
-
-# Regular expression which should only match correct class names
-class-rgx=[A-Z_][a-zA-Z0-9]+$
-
-# Regular expression which should only match correct function names
-function-rgx=[a-z_][a-z0-9_]{2,30}$
-
-# Regular expression which should only match correct method names
-method-rgx=[a-z_][a-z0-9_]{2,30}$
-
-# Regular expression which should only match correct instance attribute names
-attr-rgx=[a-z_][a-z0-9_]{2,30}$
-
-# Regular expression which should only match correct argument names
-argument-rgx=[a-z_][a-z0-9_]{2,30}$
-
-# Regular expression which should only match correct variable names
-variable-rgx=[a-z_][a-z0-9_]{2,30}$
-
-# Regular expression which should only match correct attribute names in class
-# bodies
-class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
-
-# Regular expression which should only match correct list comprehension /
-# generator expression variable names
-inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
-
-# Good variable names which should always be accepted, separated by a comma
-good-names=i,j,k,ex,Run,_
-
-# Bad variable names which should always be refused, separated by a comma
-bad-names=foo,bar,baz,toto,tutu,tata
-
-# Regular expression which should only match function or class names that do
-# not require a docstring.
-no-docstring-rgx=__.*__
-
-# Minimum line length for functions/classes that require docstrings, shorter
-# ones are exempt.
-docstring-min-length=-1
-
-
-[FORMAT]
-
-# Maximum number of characters on a single line.
-max-line-length=80
-
-# Regexp for a line that is allowed to be longer than the limit.
-ignore-long-lines=^\s*(# )?<?https?://\S+>?$
-
-# Allow the body of an if to be on the same line as the test if there is no
-# else.
-single-line-if-stmt=no
-
-# List of optional constructs for which whitespace checking is disabled
-no-space-check=trailing-comma,dict-separator
-
-# Maximum number of lines in a module
-max-module-lines=1000
-
-# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
-# tab).
-indent-string='    '
-
-
-[DESIGN]
-
-# Maximum number of arguments for function / method
-max-args=5
-
-# Argument names that match this expression will be ignored. Default to name
-# with leading underscore
-ignored-argument-names=_.*
-
-# Maximum number of locals for function / method body
-max-locals=15
-
-# Maximum number of return / yield for function / method body
-max-returns=6
-
-# Maximum number of branch for function / method body
-max-branches=12
-
-# Maximum number of statements in function / method body
-max-statements=50
-
-# Maximum number of parents for a class (see R0901).
-max-parents=7
-
-# Maximum number of attributes for a class (see R0902).
-max-attributes=7
-
-# Minimum number of public methods for a class (see R0903).
-min-public-methods=2
-
-# Maximum number of public methods for a class (see R0904).
-max-public-methods=20
-
-
-[IMPORTS]
-
-# Deprecated modules which should not be used, separated by a comma
-deprecated-modules=regsub,TERMIOS,Bastion,rexec
-
-# Create a graph of every (i.e. internal and external) dependencies in the
-# given file (report RP0402 must not be disabled)
-import-graph=
-
-# Create a graph of external dependencies in the given file (report RP0402 must
-# not be disabled)
-ext-import-graph=
-
-# Create a graph of internal dependencies in the given file (report RP0402 must
-# not be disabled)
-int-import-graph=
-
-
-[CLASSES]
-
-# List of interface methods to ignore, separated by a comma. This is used for
-# instance to not check methods defines in Zope's Interface base class.
-ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
-
-# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,__new__,setUp
-
-# List of valid names for the first argument in a class method.
-valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-valid-metaclass-classmethod-first-arg=mcs
-
-
-[EXCEPTIONS]
-
-# Exceptions that will emit a warning when being caught. Defaults to
-# "Exception"
-overgeneral-exceptions=Exception
diff --git a/pyproject.toml b/pyproject.toml
index 2d1481f263..658771a89a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -34,14 +34,9 @@
         name = "Internal Changes"
         showcontent = true
 
-[tool.black]
-target-version = ['py38', 'py39', 'py310', 'py311']
-# black ignores everything in .gitignore by default, see
-# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore
-# Use `extend-exclude` if you want to exclude something in addition to this.
-
 [tool.ruff]
 line-length = 88
+target-version = "py38"
 
 [tool.ruff.lint]
 # See https://beta.ruff.rs/docs/rules/#error-e
@@ -63,6 +58,8 @@ select = [
     "W",
     # pyflakes
     "F",
+    # isort
+    "I001",
     # flake8-bugbear
     "B0",
     # flake8-comprehensions
@@ -79,17 +76,20 @@ select = [
     "EXE",
 ]
 
-[tool.isort]
-line_length = 88
-sections = ["FUTURE", "STDLIB", "THIRDPARTY", "TWISTED", "FIRSTPARTY", "TESTS", "LOCALFOLDER"]
-default_section = "THIRDPARTY"
-known_first_party = ["synapse"]
-known_tests = ["tests"]
-known_twisted = ["twisted", "OpenSSL"]
-multi_line_output = 3
-include_trailing_comma = true
-combine_as_imports = true
-skip_gitignore = true
+[tool.ruff.lint.isort]
+combine-as-imports = true
+section-order = ["future", "standard-library", "third-party", "twisted", "first-party", "testing", "local-folder"]
+known-first-party = ["synapse"]
+
+[tool.ruff.lint.isort.sections]
+twisted = ["twisted", "OpenSSL"]
+testing = ["tests"]
+
+[tool.ruff.format]
+quote-style = "double"
+indent-style = "space"
+skip-magic-trailing-comma = false
+line-ending = "auto"
 
 [tool.maturin]
 manifest-path = "rust/Cargo.toml"
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
 
 [tool.poetry]
 name = "matrix-synapse"
-version = "1.110.0"
+version = "1.117.0"
 description = "Homeserver for the Matrix decentralised comms protocol"
 authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
 license = "AGPL-3.0-or-later"
@@ -201,8 +201,8 @@ netaddr = ">=0.7.18"
 # add a lower bound to the Jinja2 dependency.
 Jinja2 = ">=3.0"
 bleach = ">=1.4.3"
-# We use `Self`, which were added in `typing-extensions` 4.0.
-typing-extensions = ">=4.0"
+# We use `assert_never`, which were added in `typing-extensions` 4.1.
+typing-extensions = ">=4.1"
 # We enforce that we have a `cryptography` version that bundles an `openssl`
 # with the latest security patches.
 cryptography = ">=3.4.7"
@@ -320,9 +320,7 @@ all = [
 # failing on new releases. Keeping lower bounds loose here means that dependabot
 # can bump versions without having to update the content-hash in the lockfile.
 # This helps prevents merge conflicts when running a batch of dependabot updates.
-isort = ">=5.10.1"
-black = ">=22.7.0"
-ruff = "0.5.0"
+ruff = "0.6.9"
 # Type checking only works with the pydantic.v1 compat module from pydantic v2
 pydantic = "^2"
 
diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs
index 4fea035b96..28537e187e 100644
--- a/rust/benches/evaluator.rs
+++ b/rust/benches/evaluator.rs
@@ -60,6 +60,7 @@ fn bench_match_exact(b: &mut Bencher) {
         true,
         vec![],
         false,
+        false,
     )
     .unwrap();
 
@@ -105,6 +106,7 @@ fn bench_match_word(b: &mut Bencher) {
         true,
         vec![],
         false,
+        false,
     )
     .unwrap();
 
@@ -150,6 +152,7 @@ fn bench_match_word_miss(b: &mut Bencher) {
         true,
         vec![],
         false,
+        false,
     )
     .unwrap();
 
@@ -195,6 +198,7 @@ fn bench_eval_message(b: &mut Bencher) {
         true,
         vec![],
         false,
+        false,
     )
     .unwrap();
 
@@ -205,6 +209,7 @@ fn bench_eval_message(b: &mut Bencher) {
         false,
         false,
         false,
+        false,
     );
 
     b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs
index 74f02d6001..e0832ada1c 100644
--- a/rust/src/push/base_rules.rs
+++ b/rust/src/push/base_rules.rs
@@ -81,7 +81,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
         ))]),
         actions: Cow::Borrowed(&[Action::Notify]),
         default: true,
-        default_enabled: false,
+        default_enabled: true,
     },
     PushRule {
         rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs
index 2f4b6d47bb..0d436a1d7b 100644
--- a/rust/src/push/evaluator.rs
+++ b/rust/src/push/evaluator.rs
@@ -105,6 +105,9 @@ pub struct PushRuleEvaluator {
     /// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
     /// flag as MSC1767 (extensible events core).
     msc3931_enabled: bool,
+
+    // If MSC4210 (remove legacy mentions) is enabled.
+    msc4210_enabled: bool,
 }
 
 #[pymethods]
@@ -122,6 +125,7 @@ impl PushRuleEvaluator {
         related_event_match_enabled,
         room_version_feature_flags,
         msc3931_enabled,
+        msc4210_enabled,
     ))]
     pub fn py_new(
         flattened_keys: BTreeMap<String, JsonValue>,
@@ -133,6 +137,7 @@ impl PushRuleEvaluator {
         related_event_match_enabled: bool,
         room_version_feature_flags: Vec<String>,
         msc3931_enabled: bool,
+        msc4210_enabled: bool,
     ) -> Result<Self, Error> {
         let body = match flattened_keys.get("content.body") {
             Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
@@ -150,6 +155,7 @@ impl PushRuleEvaluator {
             related_event_match_enabled,
             room_version_feature_flags,
             msc3931_enabled,
+            msc4210_enabled,
         })
     }
 
@@ -176,7 +182,8 @@ impl PushRuleEvaluator {
 
             // For backwards-compatibility the legacy mention rules are disabled
             // if the event contains the 'm.mentions' property.
-            if self.has_mentions
+            // Additionally, MSC4210 always disables the legacy rules.
+            if (self.has_mentions || self.msc4210_enabled)
                 && (rule_id == "global/override/.m.rule.contains_display_name"
                     || rule_id == "global/content/.m.rule.contains_user_name"
                     || rule_id == "global/override/.m.rule.roomnotif")
@@ -526,6 +533,7 @@ fn push_rule_evaluator() {
         true,
         vec![],
         true,
+        false,
     )
     .unwrap();
 
@@ -555,6 +563,7 @@ fn test_requires_room_version_supports_condition() {
         false,
         flags,
         true,
+        false,
     )
     .unwrap();
 
@@ -582,7 +591,7 @@ fn test_requires_room_version_supports_condition() {
     };
     let rules = PushRules::new(vec![custom_rule]);
     result = evaluator.run(
-        &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false),
+        &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false),
         None,
         None,
     );
diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs
index 2a452b69a3..ef8ed150d4 100644
--- a/rust/src/push/mod.rs
+++ b/rust/src/push/mod.rs
@@ -534,6 +534,7 @@ pub struct FilteredPushRules {
     msc3381_polls_enabled: bool,
     msc3664_enabled: bool,
     msc4028_push_encrypted_events: bool,
+    msc4210_enabled: bool,
 }
 
 #[pymethods]
@@ -546,6 +547,7 @@ impl FilteredPushRules {
         msc3381_polls_enabled: bool,
         msc3664_enabled: bool,
         msc4028_push_encrypted_events: bool,
+        msc4210_enabled: bool,
     ) -> Self {
         Self {
             push_rules,
@@ -554,6 +556,7 @@ impl FilteredPushRules {
             msc3381_polls_enabled,
             msc3664_enabled,
             msc4028_push_encrypted_events,
+            msc4210_enabled,
         }
     }
 
@@ -596,6 +599,14 @@ impl FilteredPushRules {
                     return false;
                 }
 
+                if self.msc4210_enabled
+                    && (rule.rule_id == "global/override/.m.rule.contains_display_name"
+                        || rule.rule_id == "global/content/.m.rule.contains_user_name"
+                        || rule.rule_id == "global/override/.m.rule.roomnotif")
+                {
+                    return false;
+                }
+
                 true
             })
             .map(|r| {
diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py
index de2a134544..88c8419400 100755
--- a/scripts-dev/build_debian_packages.py
+++ b/scripts-dev/build_debian_packages.py
@@ -32,8 +32,8 @@ DISTS = (
     "debian:sid",  # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
     "ubuntu:focal",  # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
     "ubuntu:jammy",  # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
-    "ubuntu:lunar",  # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
-    "ubuntu:mantic",  # 23.10 (EOL 2024-07) (our EOL forced by Python 3.11 is 2027-10-24)
+    "ubuntu:noble",  # 24.04 LTS (EOL 2029-06)
+    "ubuntu:oracular",  # 24.10 (EOL 2025-07)
     "debian:trixie",  # (EOL not specified yet)
 )
 
diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py
index 9e67375b6a..5eb1f0a9df 100755
--- a/scripts-dev/check_pydantic_models.py
+++ b/scripts-dev/check_pydantic_models.py
@@ -31,6 +31,7 @@ Pydantic does not yet offer a strict mode, but it is planned for pydantic v2. Se
 until then, this script is a best effort to stop us from introducing type coersion bugs
 (like the infamous stringy power levels fixed in room version 10).
 """
+
 import argparse
 import contextlib
 import functools
@@ -44,7 +45,6 @@ import traceback
 import unittest.mock
 from contextlib import contextmanager
 from typing import (
-    TYPE_CHECKING,
     Any,
     Callable,
     Dict,
@@ -56,30 +56,17 @@ from typing import (
 )
 
 from parameterized import parameterized
-
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import (
-        BaseModel as PydanticBaseModel,
-        conbytes,
-        confloat,
-        conint,
-        constr,
-    )
-    from pydantic.v1.typing import get_args
-else:
-    from pydantic import (
-        BaseModel as PydanticBaseModel,
-        conbytes,
-        confloat,
-        conint,
-        constr,
-    )
-    from pydantic.typing import get_args
-
 from typing_extensions import ParamSpec
 
+from synapse._pydantic_compat import (
+    BaseModel as PydanticBaseModel,
+    conbytes,
+    confloat,
+    conint,
+    constr,
+    get_args,
+)
+
 logger = logging.getLogger(__name__)
 
 CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: List[Callable] = [
@@ -182,22 +169,16 @@ def monkeypatch_pydantic() -> Generator[None, None, None]:
         # Most Synapse code ought to import the patched objects directly from
         # `pydantic`. But we also patch their containing modules `pydantic.main` and
         # `pydantic.types` for completeness.
-        patch_basemodel1 = unittest.mock.patch(
-            "pydantic.BaseModel", new=PatchedBaseModel
+        patch_basemodel = unittest.mock.patch(
+            "synapse._pydantic_compat.BaseModel", new=PatchedBaseModel
         )
-        patch_basemodel2 = unittest.mock.patch(
-            "pydantic.main.BaseModel", new=PatchedBaseModel
-        )
-        patches.enter_context(patch_basemodel1)
-        patches.enter_context(patch_basemodel2)
+        patches.enter_context(patch_basemodel)
         for factory in CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG:
             wrapper: Callable = make_wrapper(factory)
-            patch1 = unittest.mock.patch(f"pydantic.{factory.__name__}", new=wrapper)
-            patch2 = unittest.mock.patch(
-                f"pydantic.types.{factory.__name__}", new=wrapper
+            patch = unittest.mock.patch(
+                f"synapse._pydantic_compat.{factory.__name__}", new=wrapper
             )
-            patches.enter_context(patch1)
-            patches.enter_context(patch2)
+            patches.enter_context(patch)
         yield
 
 
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 4ad547bc7e..b6dcb96e2c 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -220,9 +220,11 @@ test_packages=(
     ./tests/msc3874
     ./tests/msc3890
     ./tests/msc3391
+    ./tests/msc3757
     ./tests/msc3930
     ./tests/msc3902
     ./tests/msc3967
+    ./tests/msc4140
 )
 
 # Enable dirty runs, so tests will reuse the same container where possible.
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
index 4c758e5424..fb879ef555 100755
--- a/scripts-dev/federation_client.py
+++ b/scripts-dev/federation_client.py
@@ -43,7 +43,7 @@ import argparse
 import base64
 import json
 import sys
-from typing import Any, Dict, Optional, Tuple
+from typing import Any, Dict, Mapping, Optional, Tuple, Union
 from urllib import parse as urlparse
 
 import requests
@@ -75,7 +75,7 @@ def encode_canonical_json(value: object) -> bytes:
         value,
         # Encode code-points outside of ASCII as UTF-8 rather than \u escapes
         ensure_ascii=False,
-        # Remove unecessary white space.
+        # Remove unnecessary white space.
         separators=(",", ":"),
         # Sort the keys of dictionaries.
         sort_keys=True,
@@ -298,12 +298,23 @@ class MatrixConnectionAdapter(HTTPAdapter):
 
         return super().send(request, *args, **kwargs)
 
-    def get_connection(
-        self, url: str, proxies: Optional[Dict[str, str]] = None
+    def get_connection_with_tls_context(
+        self,
+        request: PreparedRequest,
+        verify: Optional[Union[bool, str]],
+        proxies: Optional[Mapping[str, str]] = None,
+        cert: Optional[Union[Tuple[str, str], str]] = None,
     ) -> HTTPConnectionPool:
-        # overrides the get_connection() method in the base class
-        parsed = urlparse.urlsplit(url)
-        (host, port, ssl_server_name) = self._lookup(parsed.netloc)
+        # overrides the get_connection_with_tls_context() method in the base class
+        parsed = urlparse.urlsplit(request.url)
+
+        # Extract the server name from the request URL, and ensure it's a str.
+        hostname = parsed.netloc
+        if isinstance(hostname, bytes):
+            hostname = hostname.decode("utf-8")
+        assert isinstance(hostname, str)
+
+        (host, port, ssl_server_name) = self._lookup(hostname)
         print(
             f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
         )
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index 8acf0a6fb8..c656047729 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -1,8 +1,9 @@
 #!/usr/bin/env bash
 #
 # Runs linting scripts over the local Synapse checkout
-# black - opinionated code formatter
 # ruff - lints and finds mistakes
+# mypy - typechecks python code
+# cargo clippy - lints rust code
 
 set -e
 
@@ -101,12 +102,6 @@ echo
 # Print out the commands being run
 set -x
 
-# Ensure the sort order of imports.
-isort "${files[@]}"
-
-# Ensure Python code conforms to an opinionated style.
-python3 -m black "${files[@]}"
-
 # Ensure the sample configuration file conforms to style checks.
 ./scripts-dev/config-lint.sh
 
@@ -114,6 +109,9 @@ python3 -m black "${files[@]}"
 # --quiet suppresses the update check.
 ruff check --quiet --fix "${files[@]}"
 
+# Reformat Python code.
+ruff format --quiet "${files[@]}"
+
 # Catch any common programming mistakes in Rust code.
 #
 # --bins, --examples, --lib, --tests combined explicitly disable checking
diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py
index 877b831751..a15c3c005c 100644
--- a/scripts-dev/mypy_synapse_plugin.py
+++ b/scripts-dev/mypy_synapse_plugin.py
@@ -38,6 +38,7 @@ from mypy.types import (
     NoneType,
     TupleType,
     TypeAliasType,
+    TypeVarType,
     UninhabitedType,
     UnionType,
 )
@@ -233,6 +234,7 @@ IMMUTABLE_CUSTOM_TYPES = {
     "synapse.synapse_rust.push.FilteredPushRules",
     # This is technically not immutable, but close enough.
     "signedjson.types.VerifyKey",
+    "synapse.types.StrCollection",
 }
 
 # Immutable containers only if the values are also immutable.
@@ -298,7 +300,7 @@ def is_cacheable(
 
         elif rt.type.fullname in MUTABLE_CONTAINER_TYPES:
             # Mutable containers are mutable regardless of their underlying type.
-            return False, None
+            return False, f"container {rt.type.fullname} is mutable"
 
         elif "attrs" in rt.type.metadata:
             # attrs classes are only cachable iff it is frozen (immutable itself)
@@ -318,6 +320,9 @@ def is_cacheable(
             else:
                 return False, "non-frozen attrs class"
 
+        elif rt.type.is_enum:
+            # We assume Enum values are immutable
+            return True, None
         else:
             # Ensure we fail for unknown types, these generally means that the
             # above code is not complete.
@@ -326,6 +331,18 @@ def is_cacheable(
                 f"Don't know how to handle {rt.type.fullname} return type instance",
             )
 
+    elif isinstance(rt, TypeVarType):
+        # We consider TypeVars immutable if they are bound to a set of immutable
+        # types.
+        if rt.values:
+            for value in rt.values:
+                ok, note = is_cacheable(value, signature, verbose)
+                if not ok:
+                    return False, f"TypeVar bound not cacheable {value}"
+            return True, None
+
+        return False, "TypeVar is unbound"
+
     elif isinstance(rt, NoneType):
         # None is cachable.
         return True, None
@@ -343,7 +360,7 @@ def is_cacheable(
         # For a type alias, check if the underlying real type is cachable.
         return is_cacheable(mypy.types.get_proper_type(rt), signature, verbose)
 
-    elif isinstance(rt, UninhabitedType) and rt.is_noreturn:
+    elif isinstance(rt, UninhabitedType):
         # There is no return value, just consider it cachable. This is only used
         # in tests.
         return True, None
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index 5e519bb758..b14b61c705 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -20,8 +20,7 @@
 #
 #
 
-"""An interactive script for doing a release. See `cli()` below.
-"""
+"""An interactive script for doing a release. See `cli()` below."""
 
 import glob
 import json
@@ -41,7 +40,7 @@ import commonmark
 import git
 from click.exceptions import ClickException
 from git import GitCommandError, Repo
-from github import Github
+from github import BadCredentialsException, Github
 from packaging import version
 
 
@@ -324,6 +323,9 @@ def tag(gh_token: Optional[str]) -> None:
 def _tag(gh_token: Optional[str]) -> None:
     """Tags the release and generates a draft GitHub release"""
 
+    # Test that the GH Token is valid before continuing.
+    check_valid_gh_token(gh_token)
+
     # Make sure we're in a git repo.
     repo = get_repo_and_check_clean_checkout()
 
@@ -418,6 +420,11 @@ def publish(gh_token: str) -> None:
 def _publish(gh_token: str) -> None:
     """Publish release on GitHub."""
 
+    if gh_token:
+        # Test that the GH Token is valid before continuing.
+        gh = Github(gh_token)
+        gh.get_user()
+
     # Make sure we're in a git repo.
     get_repo_and_check_clean_checkout()
 
@@ -460,6 +467,9 @@ def upload(gh_token: Optional[str]) -> None:
 def _upload(gh_token: Optional[str]) -> None:
     """Upload release to pypi."""
 
+    # Test that the GH Token is valid before continuing.
+    check_valid_gh_token(gh_token)
+
     current_version = get_package_version()
     tag_name = f"v{current_version}"
 
@@ -555,6 +565,9 @@ def wait_for_actions(gh_token: Optional[str]) -> None:
 
 
 def _wait_for_actions(gh_token: Optional[str]) -> None:
+    # Test that the GH Token is valid before continuing.
+    check_valid_gh_token(gh_token)
+
     # Find out the version and tag name.
     current_version = get_package_version()
     tag_name = f"v{current_version}"
@@ -711,6 +724,11 @@ Ask the designated people to do the blog and tweets."""
 @cli.command()
 @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
 def full(gh_token: str) -> None:
+    if gh_token:
+        # Test that the GH Token is valid before continuing.
+        gh = Github(gh_token)
+        gh.get_user()
+
     click.echo("1. If this is a security release, read the security wiki page.")
     click.echo("2. Check for any release blockers before proceeding.")
     click.echo("    https://github.com/element-hq/synapse/labels/X-Release-Blocker")
@@ -782,6 +800,22 @@ def get_repo_and_check_clean_checkout(
     return repo
 
 
+def check_valid_gh_token(gh_token: Optional[str]) -> None:
+    """Check that a github token is valid, if supplied"""
+
+    if not gh_token:
+        # No github token supplied, so nothing to do.
+        return
+
+    try:
+        gh = Github(gh_token)
+
+        # We need to lookup name to trigger a request.
+        _name = gh.get_user().name
+    except BadCredentialsException as e:
+        raise click.ClickException(f"Github credentials are bad: {e}")
+
+
 def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
     """Find the branch/ref, looking first locally then in the remote."""
     if ref_name in repo.references:
diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi
index a141218d3d..c9a4114b1e 100644
--- a/stubs/txredisapi.pyi
+++ b/stubs/txredisapi.pyi
@@ -13,8 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""Contains *incomplete* type hints for txredisapi.
-"""
+"""Contains *incomplete* type hints for txredisapi."""
+
 from typing import Any, List, Optional, Type, Union
 
 from twisted.internet import protocol
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 99ed7a5374..73b92f12be 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -20,8 +20,7 @@
 #
 #
 
-""" This is an implementation of a Matrix homeserver.
-"""
+"""This is an implementation of a Matrix homeserver."""
 
 import os
 import sys
diff --git a/synapse/_pydantic_compat.py b/synapse/_pydantic_compat.py
index a6ceeb04d2..f0eedf5c6d 100644
--- a/synapse/_pydantic_compat.py
+++ b/synapse/_pydantic_compat.py
@@ -19,6 +19,8 @@
 #
 #
 
+from typing import TYPE_CHECKING
+
 from packaging.version import Version
 
 try:
@@ -30,4 +32,64 @@ except ImportError:
 
 HAS_PYDANTIC_V2: bool = Version(pydantic_version).major == 2
 
-__all__ = ("HAS_PYDANTIC_V2",)
+if TYPE_CHECKING or HAS_PYDANTIC_V2:
+    from pydantic.v1 import (
+        BaseModel,
+        Extra,
+        Field,
+        MissingError,
+        PydanticValueError,
+        StrictBool,
+        StrictInt,
+        StrictStr,
+        ValidationError,
+        conbytes,
+        confloat,
+        conint,
+        constr,
+        parse_obj_as,
+        validator,
+    )
+    from pydantic.v1.error_wrappers import ErrorWrapper
+    from pydantic.v1.typing import get_args
+else:
+    from pydantic import (
+        BaseModel,
+        Extra,
+        Field,
+        MissingError,
+        PydanticValueError,
+        StrictBool,
+        StrictInt,
+        StrictStr,
+        ValidationError,
+        conbytes,
+        confloat,
+        conint,
+        constr,
+        parse_obj_as,
+        validator,
+    )
+    from pydantic.error_wrappers import ErrorWrapper
+    from pydantic.typing import get_args
+
+__all__ = (
+    "HAS_PYDANTIC_V2",
+    "BaseModel",
+    "constr",
+    "conbytes",
+    "conint",
+    "confloat",
+    "ErrorWrapper",
+    "Extra",
+    "Field",
+    "get_args",
+    "MissingError",
+    "parse_obj_as",
+    "PydanticValueError",
+    "StrictBool",
+    "StrictInt",
+    "StrictStr",
+    "ValidationError",
+    "validator",
+)
diff --git a/synapse/_scripts/generate_workers_map.py b/synapse/_scripts/generate_workers_map.py
index 715c7ddc17..09feb8cf30 100755
--- a/synapse/_scripts/generate_workers_map.py
+++ b/synapse/_scripts/generate_workers_map.py
@@ -171,7 +171,7 @@ def elide_http_methods_if_unconflicting(
     """
 
     def paths_to_methods_dict(
-        methods_and_paths: Iterable[Tuple[str, str]]
+        methods_and_paths: Iterable[Tuple[str, str]],
     ) -> Dict[str, Set[str]]:
         """
         Given (method, path) pairs, produces a dict from path to set of methods
@@ -201,7 +201,7 @@ def elide_http_methods_if_unconflicting(
 
 
 def simplify_path_regexes(
-    registrations: Dict[Tuple[str, str], EndpointDescription]
+    registrations: Dict[Tuple[str, str], EndpointDescription],
 ) -> Dict[Tuple[str, str], EndpointDescription]:
     """
     Simplify all the path regexes for the dict of endpoint descriptions,
diff --git a/synapse/_scripts/hash_password.py b/synapse/_scripts/hash_password.py
index 3bed367be2..2b7d3585cb 100755
--- a/synapse/_scripts/hash_password.py
+++ b/synapse/_scripts/hash_password.py
@@ -56,7 +56,9 @@ def main() -> None:
     password_pepper = password_config.get("pepper", password_pepper)
     password = args.password
 
-    if not password:
+    if not password and not sys.stdin.isatty():
+        password = sys.stdin.readline().strip()
+    elif not password:
         password = prompt_for_pass()
 
     # On Python 2, make sure we decode it to Unicode before we normalise it
diff --git a/synapse/_scripts/review_recent_signups.py b/synapse/_scripts/review_recent_signups.py
index ad88df477a..62723c539d 100644
--- a/synapse/_scripts/review_recent_signups.py
+++ b/synapse/_scripts/review_recent_signups.py
@@ -40,6 +40,7 @@ from synapse.storage.engines import create_engine
 
 class ReviewConfig(RootConfig):
     "A config class that just pulls out the database config"
+
     config_classes = [DatabaseConfig]
 
 
@@ -160,7 +161,11 @@ def main() -> None:
 
     with make_conn(database_config, engine, "review_recent_signups") as db_conn:
         # This generates a type of Cursor, not LoggingTransaction.
-        user_infos = get_recent_users(db_conn.cursor(), since_ms, exclude_users_with_appservice)  # type: ignore[arg-type]
+        user_infos = get_recent_users(
+            db_conn.cursor(),
+            since_ms,  # type: ignore[arg-type]
+            exclude_users_with_appservice,
+        )
 
     for user_info in user_infos:
         if exclude_users_with_email and user_info.emails:
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index 3bb4a34938..31639d366e 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -119,18 +119,24 @@ BOOLEAN_COLUMNS = {
     "e2e_room_keys": ["is_verified"],
     "event_edges": ["is_state"],
     "events": ["processed", "outlier", "contains_url"],
-    "local_media_repository": ["safe_from_quarantine"],
+    "local_media_repository": ["safe_from_quarantine", "authenticated"],
+    "per_user_experimental_features": ["enabled"],
     "presence_list": ["accepted"],
     "presence_stream": ["currently_active"],
     "public_room_list_stream": ["visibility"],
     "pushers": ["enabled"],
     "redactions": ["have_censored"],
+    "remote_media_cache": ["authenticated"],
     "room_stats_state": ["is_federatable"],
     "rooms": ["is_public", "has_auth_chain_index"],
+    "sliding_sync_joined_rooms": ["is_encrypted"],
+    "sliding_sync_membership_snapshots": [
+        "has_known_state",
+        "is_encrypted",
+    ],
     "users": ["shadow_banned", "approved", "locked", "suspended"],
     "un_partial_stated_event_stream": ["rejection_status_changed"],
     "users_who_share_rooms": ["share_private"],
-    "per_user_experimental_features": ["enabled"],
 }
 
 
@@ -711,9 +717,7 @@ class Porter:
                 return
 
             # Check if all background updates are done, abort if not.
-            updates_complete = (
-                await self.sqlite_store.db_pool.updates.has_completed_background_updates()
-            )
+            updates_complete = await self.sqlite_store.db_pool.updates.has_completed_background_updates()
             if not updates_complete:
                 end_error = (
                     "Pending background updates exist in the SQLite3 database."
@@ -1089,10 +1093,10 @@ class Porter:
         return done, remaining + done
 
     async def _setup_state_group_id_seq(self) -> None:
-        curr_id: Optional[int] = (
-            await self.sqlite_store.db_pool.simple_select_one_onecol(
-                table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
-            )
+        curr_id: Optional[
+            int
+        ] = await self.sqlite_store.db_pool.simple_select_one_onecol(
+            table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
         )
 
         if not curr_id:
@@ -1180,13 +1184,13 @@ class Porter:
         )
 
     async def _setup_auth_chain_sequence(self) -> None:
-        curr_chain_id: Optional[int] = (
-            await self.sqlite_store.db_pool.simple_select_one_onecol(
-                table="event_auth_chains",
-                keyvalues={},
-                retcol="MAX(chain_id)",
-                allow_none=True,
-            )
+        curr_chain_id: Optional[
+            int
+        ] = await self.sqlite_store.db_pool.simple_select_one_onecol(
+            table="event_auth_chains",
+            keyvalues={},
+            retcol="MAX(chain_id)",
+            allow_none=True,
         )
 
         def r(txn: LoggingTransaction) -> None:
diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py
index f61b39ded7..53907c01d4 100644
--- a/synapse/api/auth/msc3861_delegated.py
+++ b/synapse/api/auth/msc3861_delegated.py
@@ -121,7 +121,9 @@ class MSC3861DelegatedAuth(BaseAuth):
         self._hostname = hs.hostname
         self._admin_token = self._config.admin_token
 
-        self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata)
+        self._issuer_metadata = RetryOnExceptionCachedCall[OpenIDProviderMetadata](
+            self._load_metadata
+        )
 
         if isinstance(auth_method, PrivateKeyJWTWithKid):
             # Use the JWK as the client secret when using the private_key_jwt method
@@ -145,6 +147,45 @@ class MSC3861DelegatedAuth(BaseAuth):
         # metadata.validate_introspection_endpoint()
         return metadata
 
+    async def issuer(self) -> str:
+        """
+        Get the configured issuer
+
+        This will use the issuer value set in the metadata,
+        falling back to the one set in the config if not set in the metadata
+        """
+        metadata = await self._issuer_metadata.get()
+        return metadata.issuer or self._config.issuer
+
+    async def account_management_url(self) -> Optional[str]:
+        """
+        Get the configured account management URL
+
+        This will discover the account management URL from the issuer if it's not set in the config
+        """
+        if self._config.account_management_url is not None:
+            return self._config.account_management_url
+
+        try:
+            metadata = await self._issuer_metadata.get()
+            return metadata.get("account_management_uri", None)
+        # We don't want to raise here if we can't load the metadata
+        except Exception:
+            logger.warning("Failed to load metadata:", exc_info=True)
+            return None
+
+    async def _introspection_endpoint(self) -> str:
+        """
+        Returns the introspection endpoint of the issuer
+
+        It uses the config option if set, otherwise it will use OIDC discovery to get it
+        """
+        if self._config.introspection_endpoint is not None:
+            return self._config.introspection_endpoint
+
+        metadata = await self._issuer_metadata.get()
+        return metadata.get("introspection_endpoint")
+
     async def _introspect_token(self, token: str) -> IntrospectionToken:
         """
         Send a token to the introspection endpoint and returns the introspection response
@@ -161,8 +202,7 @@ class MSC3861DelegatedAuth(BaseAuth):
         Returns:
             The introspection response
         """
-        metadata = await self._issuer_metadata.get()
-        introspection_endpoint = metadata.get("introspection_endpoint")
+        introspection_endpoint = await self._introspection_endpoint()
         raw_headers: Dict[str, str] = {
             "Content-Type": "application/x-www-form-urlencoded",
             "User-Agent": str(self._http_client.user_agent, "utf-8"),
@@ -298,7 +338,7 @@ class MSC3861DelegatedAuth(BaseAuth):
             logger.exception("Failed to introspect token")
             raise SynapseError(503, "Unable to introspect the access token")
 
-        logger.info(f"Introspection result: {introspection_result!r}")
+        logger.debug("Introspection result: %r", introspection_result)
 
         # TODO: introspection verification should be more extensive, especially:
         #   - verify the audience
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 9265a271d2..8db302b3d8 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -50,7 +50,7 @@ class Membership:
     KNOCK: Final = "knock"
     LEAVE: Final = "leave"
     BAN: Final = "ban"
-    LIST: Final = {INVITE, JOIN, KNOCK, LEAVE, BAN}
+    LIST: Final = frozenset((INVITE, JOIN, KNOCK, LEAVE, BAN))
 
 
 class PresenceState:
@@ -128,9 +128,13 @@ class EventTypes:
     SpaceParent: Final = "m.space.parent"
 
     Reaction: Final = "m.reaction"
+    Sticker: Final = "m.sticker"
+    LiveLocationShareStart: Final = "m.beacon_info"
 
     CallInvite: Final = "m.call.invite"
 
+    PollStart: Final = "m.poll.start"
+
 
 class ToDeviceEventTypes:
     RoomKeyRequest: Final = "m.room_key_request"
@@ -221,6 +225,13 @@ class EventContentFields:
     # This is deprecated in MSC2175.
     ROOM_CREATOR: Final = "creator"
 
+    # The version of the room for `m.room.create` events.
+    ROOM_VERSION: Final = "room_version"
+
+    ROOM_NAME: Final = "name"
+
+    MEMBERSHIP: Final = "membership"
+
     # Used in m.room.guest_access events.
     GUEST_ACCESS: Final = "guest_access"
 
@@ -233,6 +244,11 @@ class EventContentFields:
     # an unspecced field added to to-device messages to identify them uniquely-ish
     TO_DEVICE_MSGID: Final = "org.matrix.msgid"
 
+    # `m.room.encryption`` algorithm field
+    ENCRYPTION_ALGORITHM: Final = "algorithm"
+
+    TOMBSTONE_SUCCESSOR_ROOM: Final = "replacement_room"
+
 
 class EventUnsignedContentFields:
     """Fields found inside the 'unsigned' data on events"""
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index dd4a1ae706..e6efa7a424 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -128,6 +128,10 @@ class Codes(str, Enum):
     # MSC2677
     DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
 
+    # MSC3575 we are telling the client they need to expire their sliding sync
+    # connection.
+    UNKNOWN_POS = "M_UNKNOWN_POS"
+
 
 class CodeMessageException(RuntimeError):
     """An exception with integer code, a message string attributes and optional headers.
@@ -847,3 +851,17 @@ class PartialStateConflictError(SynapseError):
             msg=PartialStateConflictError.message(),
             errcode=Codes.UNKNOWN,
         )
+
+
+class SlidingSyncUnknownPosition(SynapseError):
+    """An error that Synapse can return to signal to the client to expire their
+    sliding sync connection (i.e. send a new request without a `?since=`
+    param).
+    """
+
+    def __init__(self) -> None:
+        super().__init__(
+            HTTPStatus.BAD_REQUEST,
+            msg="Unknown position",
+            errcode=Codes.UNKNOWN_POS,
+        )
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index 26b8711851..b80630c5d3 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -236,9 +236,8 @@ class Ratelimiter:
             requester: The requester that is doing the action, if any.
             key: An arbitrary key used to classify an action. Defaults to the
                 requester's user ID.
-            n_actions: The number of times the user wants to do this action. If the user
-                cannot do all of the actions, the user's action count is not incremented
-                at all.
+            n_actions: The number of times the user performed the action. May be negative
+                to "refund" the rate limit.
             _time_now_s: The current time. Optional, defaults to the current time according
                 to self.clock. Only used by tests.
         """
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index fbc1d58ecb..4bde385f78 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -107,6 +107,8 @@ class RoomVersion:
     # support the flag. Unknown flags are ignored by the evaluator, making conditions
     # fail if used.
     msc3931_push_features: Tuple[str, ...]  # values from PushRuleRoomFlag
+    # MSC3757: Restricting who can overwrite a state event
+    msc3757_enabled: bool
 
 
 class RoomVersions:
@@ -128,6 +130,7 @@ class RoomVersions:
         knock_restricted_join_rule=False,
         enforce_int_power_levels=False,
         msc3931_push_features=(),
+        msc3757_enabled=False,
     )
     V2 = RoomVersion(
         "2",
@@ -147,6 +150,7 @@ class RoomVersions:
         knock_restricted_join_rule=False,
         enforce_int_power_levels=False,
         msc3931_push_features=(),
+        msc3757_enabled=False,
     )
     V3 = RoomVersion(
         "3",
@@ -166,6 +170,7 @@ class RoomVersions:
         knock_restricted_join_rule=False,
         enforce_int_power_levels=False,
         msc3931_push_features=(),
+        msc3757_enabled=False,
     )
     V4 = RoomVersion(
         "4",
@@ -185,6 +190,7 @@ class RoomVersions:
         knock_restricted_join_rule=False,
         enforce_int_power_levels=False,
         msc3931_push_features=(),
+        msc3757_enabled=False,
     )
     V5 = RoomVersion(
         "5",
@@ -204,6 +210,7 @@ class RoomVersions:
         knock_restricted_join_rule=False,
         enforce_int_power_levels=False,
         msc3931_push_features=(),
+        msc3757_enabled=False,
     )
     V6 = RoomVersion(
         "6",
@@ -223,6 +230,7 @@ class RoomVersions:
         knock_restricted_join_rule=False,
         enforce_int_power_levels=False,
         msc3931_push_features=(),
+        msc3757_enabled=False,
     )
     V7 = RoomVersion(
         "7",
@@ -242,6 +250,7 @@ class RoomVersions:
         knock_restricted_join_rule=False,
         enforce_int_power_levels=False,
         msc3931_push_features=(),
+        msc3757_enabled=False,
     )
     V8 = RoomVersion(
         "8",
@@ -261,6 +270,7 @@ class RoomVersions:
         knock_restricted_join_rule=False,
         enforce_int_power_levels=False,
         msc3931_push_features=(),
+        msc3757_enabled=False,
     )
     V9 = RoomVersion(
         "9",
@@ -280,6 +290,7 @@ class RoomVersions:
         knock_restricted_join_rule=False,
         enforce_int_power_levels=False,
         msc3931_push_features=(),
+        msc3757_enabled=False,
     )
     V10 = RoomVersion(
         "10",
@@ -299,6 +310,7 @@ class RoomVersions:
         knock_restricted_join_rule=True,
         enforce_int_power_levels=True,
         msc3931_push_features=(),
+        msc3757_enabled=False,
     )
     MSC1767v10 = RoomVersion(
         # MSC1767 (Extensible Events) based on room version "10"
@@ -319,6 +331,28 @@ class RoomVersions:
         knock_restricted_join_rule=True,
         enforce_int_power_levels=True,
         msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
+        msc3757_enabled=False,
+    )
+    MSC3757v10 = RoomVersion(
+        # MSC3757 (Restricting who can overwrite a state event) based on room version "10"
+        "org.matrix.msc3757.10",
+        RoomDisposition.UNSTABLE,
+        EventFormatVersions.ROOM_V4_PLUS,
+        StateResolutionVersions.V2,
+        enforce_key_validity=True,
+        special_case_aliases_auth=False,
+        strict_canonicaljson=True,
+        limit_notifications_power_levels=True,
+        implicit_room_creator=False,
+        updated_redaction_rules=False,
+        restricted_join_rule=True,
+        restricted_join_rule_fix=True,
+        knock_join_rule=True,
+        msc3389_relation_redactions=False,
+        knock_restricted_join_rule=True,
+        enforce_int_power_levels=True,
+        msc3931_push_features=(),
+        msc3757_enabled=True,
     )
     V11 = RoomVersion(
         "11",
@@ -338,6 +372,28 @@ class RoomVersions:
         knock_restricted_join_rule=True,
         enforce_int_power_levels=True,
         msc3931_push_features=(),
+        msc3757_enabled=False,
+    )
+    MSC3757v11 = RoomVersion(
+        # MSC3757 (Restricting who can overwrite a state event) based on room version "11"
+        "org.matrix.msc3757.11",
+        RoomDisposition.UNSTABLE,
+        EventFormatVersions.ROOM_V4_PLUS,
+        StateResolutionVersions.V2,
+        enforce_key_validity=True,
+        special_case_aliases_auth=False,
+        strict_canonicaljson=True,
+        limit_notifications_power_levels=True,
+        implicit_room_creator=True,  # Used by MSC3820
+        updated_redaction_rules=True,  # Used by MSC3820
+        restricted_join_rule=True,
+        restricted_join_rule_fix=True,
+        knock_join_rule=True,
+        msc3389_relation_redactions=False,
+        knock_restricted_join_rule=True,
+        enforce_int_power_levels=True,
+        msc3931_push_features=(),
+        msc3757_enabled=True,
     )
 
 
@@ -355,6 +411,8 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
         RoomVersions.V9,
         RoomVersions.V10,
         RoomVersions.V11,
+        RoomVersions.MSC3757v10,
+        RoomVersions.MSC3757v11,
     )
 }
 
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index d077a2c613..03a3e96f28 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -19,7 +19,8 @@
 #
 #
 
-"""Contains the URL paths to prefix various aspects of the server with. """
+"""Contains the URL paths to prefix various aspects of the server with."""
+
 import hmac
 from hashlib import sha256
 from urllib.parse import urlencode
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 599f95466b..a528c3890d 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -3,7 +3,7 @@
 #
 # Copyright 2020 The Matrix.org Foundation C.I.C.
 # Copyright 2016 OpenMarket Ltd
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -65,6 +65,7 @@ from synapse.storage.databases.main.appservice import (
 )
 from synapse.storage.databases.main.censor_events import CensorEventsStore
 from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
+from synapse.storage.databases.main.delayed_events import DelayedEventsStore
 from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
 from synapse.storage.databases.main.devices import DeviceWorkerStore
 from synapse.storage.databases.main.directory import DirectoryWorkerStore
@@ -74,6 +75,9 @@ from synapse.storage.databases.main.event_push_actions import (
     EventPushActionsWorkerStore,
 )
 from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.experimental_features import (
+    ExperimentalFeaturesStore,
+)
 from synapse.storage.databases.main.filtering import FilteringWorkerStore
 from synapse.storage.databases.main.keys import KeyStore
 from synapse.storage.databases.main.lock import LockStore
@@ -95,6 +99,7 @@ from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
 from synapse.storage.databases.main.search import SearchStore
 from synapse.storage.databases.main.session import SessionStore
 from synapse.storage.databases.main.signatures import SignatureWorkerStore
+from synapse.storage.databases.main.sliding_sync import SlidingSyncStore
 from synapse.storage.databases.main.state import StateGroupWorkerStore
 from synapse.storage.databases.main.stats import StatsStore
 from synapse.storage.databases.main.stream import StreamWorkerStore
@@ -155,6 +160,9 @@ class GenericWorkerStore(
     LockStore,
     SessionStore,
     TaskSchedulerWorkerStore,
+    ExperimentalFeaturesStore,
+    SlidingSyncStore,
+    DelayedEventsStore,
 ):
     # Properties that multiple storage classes define. Tell mypy what the
     # expected type is.
@@ -202,6 +210,21 @@ class GenericWorkerServer(HomeServer):
                                 "/_synapse/admin": admin_resource,
                             }
                         )
+
+                        if "federation" not in res.names:
+                            # Only load the federation media resource separately if federation
+                            # resource is not specified since federation resource includes media
+                            # resource.
+                            resources[FEDERATION_PREFIX] = TransportLayerServer(
+                                self, servlet_groups=["media"]
+                            )
+                        if "client" not in res.names:
+                            # Only load the client media resource separately if client
+                            # resource is not specified since client resource includes media
+                            # resource.
+                            resources[CLIENT_API_PREFIX] = ClientRestResource(
+                                self, servlet_groups=["media"]
+                            )
                     else:
                         logger.warning(
                             "A 'media' listener is configured but the media"
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 2b111847b7..2a824e8457 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -101,6 +101,12 @@ class SynapseHomeServer(HomeServer):
                     # Skip loading openid resource if federation is defined
                     # since federation resource will include openid
                     continue
+                if name == "media" and (
+                    "federation" in res.names or "client" in res.names
+                ):
+                    # Skip loading media resource if federation or client are defined
+                    # since federation & client resources will include media
+                    continue
                 if name == "health":
                     # Skip loading, health resource is always included
                     continue
@@ -217,7 +223,7 @@ class SynapseHomeServer(HomeServer):
             )
 
         if name in ["media", "federation", "client"]:
-            if self.config.server.enable_media_repo:
+            if self.config.media.can_load_media_repo:
                 media_repo = self.get_media_repository_resource()
                 resources.update(
                     {
@@ -231,6 +237,14 @@ class SynapseHomeServer(HomeServer):
                     "'media' resource conflicts with enable_media_repo=False"
                 )
 
+        if name == "media":
+            resources[FEDERATION_PREFIX] = TransportLayerServer(
+                self, servlet_groups=["media"]
+            )
+            resources[CLIENT_API_PREFIX] = ClientRestResource(
+                self, servlet_groups=["media"]
+            )
+
         if name in ["keys", "federation"]:
             resources[SERVER_KEY_PREFIX] = KeyResource(self)
 
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index bec83419a2..7994da0868 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -54,6 +54,7 @@ UP & quit           +---------- YES                       SUCCESS
 This is all tied together by the AppServiceScheduler which DIs the required
 components.
 """
+
 import logging
 from typing import (
     TYPE_CHECKING,
diff --git a/synapse/config/_util.py b/synapse/config/_util.py
index 32b906a1ec..731b60a840 100644
--- a/synapse/config/_util.py
+++ b/synapse/config/_util.py
@@ -18,17 +18,11 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
-from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
+from typing import Any, Dict, Type, TypeVar
 
 import jsonschema
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import BaseModel, ValidationError, parse_obj_as
-else:
-    from pydantic import BaseModel, ValidationError, parse_obj_as
-
+from synapse._pydantic_compat import BaseModel, ValidationError, parse_obj_as
 from synapse.config._base import ConfigError
 from synapse.types import JsonDict, StrSequence
 
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 1b72727b75..fd14db0d02 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -140,6 +140,12 @@ class MSC3861:
                 ("experimental", "msc3861", "client_auth_method"),
             )
 
+    introspection_endpoint: Optional[str] = attr.ib(
+        default=None,
+        validator=attr.validators.optional(attr.validators.instance_of(str)),
+    )
+    """The URL of the introspection endpoint used to validate access tokens."""
+
     account_management_url: Optional[str] = attr.ib(
         default=None,
         validator=attr.validators.optional(attr.validators.instance_of(str)),
@@ -332,8 +338,10 @@ class ExperimentalConfig(Config):
         # MSC3391: Removing account data.
         self.msc3391_enabled = experimental.get("msc3391_enabled", False)
 
-        # MSC3575 (Sliding Sync API endpoints)
-        self.msc3575_enabled: bool = experimental.get("msc3575_enabled", False)
+        # MSC3575 (Sliding Sync) alternate endpoints, c.f. MSC4186.
+        #
+        # This is enabled by default as a replacement for the sliding sync proxy.
+        self.msc3575_enabled: bool = experimental.get("msc3575_enabled", True)
 
         # MSC3773: Thread notifications
         self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
@@ -437,12 +445,8 @@ class ExperimentalConfig(Config):
             "msc3823_account_suspension", False
         )
 
-        self.msc3916_authenticated_media_enabled = experimental.get(
-            "msc3916_authenticated_media_enabled", False
-        )
-
         # MSC4151: Report room API (Client-Server API)
         self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
 
-        # MSC4156: Migrate server_name to via
-        self.msc4156_enabled: bool = experimental.get("msc4156_enabled", False)
+        # MSC4210: Remove legacy mentions
+        self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py
index b41f2dc08f..5c76551f33 100644
--- a/synapse/config/jwt.py
+++ b/synapse/config/jwt.py
@@ -38,6 +38,7 @@ class JWTConfig(Config):
             self.jwt_algorithm = jwt_config["algorithm"]
 
             self.jwt_subject_claim = jwt_config.get("subject_claim", "sub")
+            self.jwt_display_name_claim = jwt_config.get("display_name_claim")
 
             # The issuer and audiences are optional, if provided, it is asserted
             # that the claims exist on the JWT.
@@ -49,5 +50,6 @@ class JWTConfig(Config):
             self.jwt_secret = None
             self.jwt_algorithm = None
             self.jwt_subject_claim = None
+            self.jwt_display_name_claim = None
             self.jwt_issuer = None
             self.jwt_audiences = None
diff --git a/synapse/config/key.py b/synapse/config/key.py
index b9925a52d2..bc96888967 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -200,16 +200,13 @@ class KeyConfig(Config):
             )
             form_secret = 'form_secret: "%s"' % random_string_with_symbols(50)
 
-        return (
-            """\
+        return """\
         %(macaroon_secret_key)s
         %(form_secret)s
         signing_key_path: "%(base_key_name)s.signing.key"
         trusted_key_servers:
           - server_name: "matrix.org"
-        """
-            % locals()
-        )
+        """ % locals()
 
     def read_signing_keys(self, signing_key_path: str, name: str) -> List[SigningKey]:
         """Read the signing keys in the given path.
@@ -249,7 +246,9 @@ class KeyConfig(Config):
             if is_signing_algorithm_supported(key_id):
                 key_base64 = key_data["key"]
                 key_bytes = decode_base64(key_base64)
-                verify_key: "VerifyKeyWithExpiry" = decode_verify_key_bytes(key_id, key_bytes)  # type: ignore[assignment]
+                verify_key: "VerifyKeyWithExpiry" = decode_verify_key_bytes(
+                    key_id, key_bytes
+                )  # type: ignore[assignment]
                 verify_key.expired = key_data["expired_ts"]
                 keys[key_id] = verify_key
             else:
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index fca0b08d6d..cfc1a57107 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -157,12 +157,9 @@ class LoggingConfig(Config):
         self, config_dir_path: str, server_name: str, **kwargs: Any
     ) -> str:
         log_config = os.path.join(config_dir_path, server_name + ".log.config")
-        return (
-            """\
+        return """\
         log_config: "%(log_config)s"
-        """
-            % locals()
-        )
+        """ % locals()
 
     def read_arguments(self, args: argparse.Namespace) -> None:
         if args.no_redirect_stdio is not None:
diff --git a/synapse/config/redis.py b/synapse/config/redis.py
index f140538088..3f38fa11b0 100644
--- a/synapse/config/redis.py
+++ b/synapse/config/redis.py
@@ -21,10 +21,15 @@
 
 from typing import Any
 
-from synapse.config._base import Config
+from synapse.config._base import Config, ConfigError, read_file
 from synapse.types import JsonDict
 from synapse.util.check_dependencies import check_requirements
 
+CONFLICTING_PASSWORD_OPTS_ERROR = """\
+You have configured both `redis.password` and `redis.password_path`.
+These are mutually incompatible.
+"""
+
 
 class RedisConfig(Config):
     section = "redis"
@@ -43,6 +48,17 @@ class RedisConfig(Config):
         self.redis_path = redis_config.get("path", None)
         self.redis_dbid = redis_config.get("dbid", None)
         self.redis_password = redis_config.get("password")
+        redis_password_path = redis_config.get("password_path")
+        if redis_password_path:
+            if self.redis_password:
+                raise ConfigError(CONFLICTING_PASSWORD_OPTS_ERROR)
+            self.redis_password = read_file(
+                redis_password_path,
+                (
+                    "redis",
+                    "password_path",
+                ),
+            ).strip()
 
         self.redis_use_tls = redis_config.get("use_tls", False)
         self.redis_certificate = redis_config.get("certificate_file", None)
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 1645470499..97ce6de528 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -126,7 +126,7 @@ class ContentRepositoryConfig(Config):
         # Only enable the media repo if either the media repo is enabled or the
         # current worker app is the media repo.
         if (
-            self.root.server.enable_media_repo is False
+            config.get("enable_media_repo", True) is False
             and config.get("worker_app") != "synapse.app.media_repository"
         ):
             self.can_load_media_repo = False
@@ -272,6 +272,10 @@ class ContentRepositoryConfig(Config):
                 remote_media_lifetime
             )
 
+        self.enable_authenticated_media = config.get(
+            "enable_authenticated_media", False
+        )
+
     def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
         assert data_dir_path is not None
         media_store = os.path.join(data_dir_path, "media_store")
diff --git a/synapse/config/server.py b/synapse/config/server.py
index a2b2305776..6a8c7cb1c9 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -2,7 +2,7 @@
 # This file is licensed under the Affero General Public License (AGPL) version 3.
 #
 # Copyright 2014-2021 The Matrix.org Foundation C.I.C.
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -384,6 +384,11 @@ class ServerConfig(Config):
         # Whether to internally track presence, requires that presence is enabled,
         self.track_presence = self.presence_enabled and presence_enabled != "untracked"
 
+        # Determines if presence results for offline users are included on initial/full sync
+        self.presence_include_offline_users_on_sync = presence_config.get(
+            "include_offline_users_on_sync", False
+        )
+
         # Custom presence router module
         # This is the legacy way of configuring it (the config should now be put in the modules section)
         self.presence_router_module_class = None
@@ -395,12 +400,6 @@ class ServerConfig(Config):
                 self.presence_router_config,
             ) = load_module(presence_router_config, ("presence", "presence_router"))
 
-        # whether to enable the media repository endpoints. This should be set
-        # to false if the media repository is running as a separate endpoint;
-        # doing so ensures that we will not run cache cleanup jobs on the
-        # master, potentially causing inconsistency.
-        self.enable_media_repo = config.get("enable_media_repo", True)
-
         # Whether to require authentication to retrieve profile data (avatars,
         # display names) of other users through the client API.
         self.require_auth_for_profile_requests = config.get(
@@ -781,6 +780,17 @@ class ServerConfig(Config):
         else:
             self.delete_stale_devices_after = None
 
+        # The maximum allowed delay duration for delayed events (MSC4140).
+        max_event_delay_duration = config.get("max_event_delay_duration")
+        if max_event_delay_duration is not None:
+            self.max_event_delay_ms: Optional[int] = self.parse_duration(
+                max_event_delay_duration
+            )
+            if self.max_event_delay_ms <= 0:
+                raise ConfigError("max_event_delay_duration must be a positive value")
+        else:
+            self.max_event_delay_ms = None
+
     def has_tls_listener(self) -> bool:
         return any(listener.is_tls() for listener in self.listeners)
 
@@ -829,13 +839,10 @@ class ServerConfig(Config):
             ).lstrip()
 
         if not unsecure_listeners:
-            unsecure_http_bindings = (
-                """- port: %(unsecure_port)s
+            unsecure_http_bindings = """- port: %(unsecure_port)s
             tls: false
             type: http
-            x_forwarded: true"""
-                % locals()
-            )
+            x_forwarded: true""" % locals()
 
             if not open_private_ports:
                 unsecure_http_bindings += (
@@ -854,16 +861,13 @@ class ServerConfig(Config):
         if not secure_listeners:
             secure_http_bindings = ""
 
-        return (
-            """\
+        return """\
         server_name: "%(server_name)s"
         pid_file: %(pid_file)s
         listeners:
           %(secure_http_bindings)s
           %(unsecure_http_bindings)s
-        """
-            % locals()
-        )
+        """ % locals()
 
     def read_arguments(self, args: argparse.Namespace) -> None:
         if args.manhole is not None:
diff --git a/synapse/config/voip.py b/synapse/config/voip.py
index 6fe43a9e32..8614a41dd4 100644
--- a/synapse/config/voip.py
+++ b/synapse/config/voip.py
@@ -23,7 +23,12 @@ from typing import Any
 
 from synapse.types import JsonDict
 
-from ._base import Config
+from ._base import Config, ConfigError, read_file
+
+CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\
+You have configured both `turn_shared_secret` and `turn_shared_secret_path`.
+These are mutually incompatible.
+"""
 
 
 class VoipConfig(Config):
@@ -32,6 +37,13 @@ class VoipConfig(Config):
     def read_config(self, config: JsonDict, **kwargs: Any) -> None:
         self.turn_uris = config.get("turn_uris", [])
         self.turn_shared_secret = config.get("turn_shared_secret")
+        turn_shared_secret_path = config.get("turn_shared_secret_path")
+        if turn_shared_secret_path:
+            if self.turn_shared_secret:
+                raise ConfigError(CONFLICTING_SHARED_SECRET_OPTS_ERROR)
+            self.turn_shared_secret = read_file(
+                turn_shared_secret_path, ("turn_shared_secret_path",)
+            ).strip()
         self.turn_username = config.get("turn_username")
         self.turn_password = config.get("turn_password")
         self.turn_user_lifetime = self.parse_duration(
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 7ecf349e4a..ab896be307 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -22,17 +22,17 @@
 
 import argparse
 import logging
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
+from typing import Any, Dict, List, Optional, Union
 
 import attr
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import BaseModel, Extra, StrictBool, StrictInt, StrictStr
-else:
-    from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr
-
+from synapse._pydantic_compat import (
+    BaseModel,
+    Extra,
+    StrictBool,
+    StrictInt,
+    StrictStr,
+)
 from synapse.config._base import (
     Config,
     ConfigError,
@@ -328,10 +328,11 @@ class WorkerConfig(Config):
                 )
 
         # type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently
-        self.instance_map: Dict[
-            str, InstanceLocationConfig
-        ] = parse_and_validate_mapping(
-            instance_map, InstanceLocationConfig  # type: ignore[arg-type]
+        self.instance_map: Dict[str, InstanceLocationConfig] = (
+            parse_and_validate_mapping(
+                instance_map,
+                InstanceLocationConfig,  # type: ignore[arg-type]
+            )
         )
 
         # Map from type of streams to source, c.f. WriterLocations.
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 8c301e077c..643d2d4e66 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -589,7 +589,7 @@ class BaseV2KeyFetcher(KeyFetcher):
                 % (server_name,)
             )
 
-        for key_id, key_data in response_json["old_verify_keys"].items():
+        for key_id, key_data in response_json.get("old_verify_keys", {}).items():
             if is_signing_algorithm_supported(key_id):
                 key_base64 = key_data["key"]
                 key_bytes = decode_base64(key_base64)
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index f5abcde2db..c208b900c5 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -388,6 +388,7 @@ LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = {
     RoomVersions.V9,
     RoomVersions.V10,
     RoomVersions.MSC1767v10,
+    RoomVersions.MSC3757v10,
 }
 
 
@@ -790,9 +791,10 @@ def get_send_level(
 
 
 def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> bool:
+    state_key = event.get_state_key()
     power_levels_event = get_power_level_event(auth_events)
 
-    send_level = get_send_level(event.type, event.get("state_key"), power_levels_event)
+    send_level = get_send_level(event.type, state_key, power_levels_event)
     user_level = get_user_power_level(event.user_id, auth_events)
 
     if user_level < send_level:
@@ -803,11 +805,34 @@ def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> b
             errcode=Codes.INSUFFICIENT_POWER,
         )
 
-    # Check state_key
-    if hasattr(event, "state_key"):
-        if event.state_key.startswith("@"):
-            if event.state_key != event.user_id:
-                raise AuthError(403, "You are not allowed to set others state")
+    if (
+        state_key is not None
+        and state_key.startswith("@")
+        and state_key != event.user_id
+    ):
+        if event.room_version.msc3757_enabled:
+            try:
+                colon_idx = state_key.index(":", 1)
+                suffix_idx = state_key.find("_", colon_idx + 1)
+                state_key_user_id = (
+                    state_key[:suffix_idx] if suffix_idx != -1 else state_key
+                )
+                if not UserID.is_valid(state_key_user_id):
+                    raise ValueError
+            except ValueError:
+                raise SynapseError(
+                    400,
+                    "State key neither equals a valid user ID, nor starts with one plus an underscore",
+                    errcode=Codes.BAD_JSON,
+                )
+            if (
+                # sender is owner of the state key
+                state_key_user_id == event.user_id
+                # sender has higher PL than the owner of the state key
+                or user_level > get_user_power_level(state_key_user_id, auth_events)
+            ):
+                return True
+        raise AuthError(403, "You are not allowed to set others state")
 
     return True
 
@@ -887,7 +912,8 @@ def _check_power_levels(
                     raise SynapseError(400, f"{v!r} must be an integer.")
             if k in {"events", "notifications", "users"}:
                 if not isinstance(v, collections.abc.Mapping) or not all(
-                    type(v) is int for v in v.values()  # noqa: E721
+                    type(v) is int
+                    for v in v.values()  # noqa: E721
                 ):
                     raise SynapseError(
                         400,
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 36e0f47e51..2e56b671f0 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -554,3 +554,22 @@ def relation_from_event(event: EventBase) -> Optional[_EventRelation]:
             aggregation_key = None
 
     return _EventRelation(parent_id, rel_type, aggregation_key)
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class StrippedStateEvent:
+    """
+    A stripped down state event. Usually used for remote invite/knocks so the user can
+    make an informed decision on whether they want to join.
+
+    Attributes:
+        type: Event `type`
+        state_key: Event `state_key`
+        sender: Event `sender`
+        content: Event `content`
+    """
+
+    type: str
+    state_key: str
+    sender: str
+    content: Dict[str, Any]
diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py
index 9cb053cd8e..9713b141bc 100644
--- a/synapse/events/presence_router.py
+++ b/synapse/events/presence_router.py
@@ -80,7 +80,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None:
     # All methods that the module provides should be async, but this wasn't enforced
     # in the old module system, so we wrap them if needed
     def async_wrapper(
-        f: Optional[Callable[P, R]]
+        f: Optional[Callable[P, R]],
     ) -> Optional[Callable[P, Awaitable[R]]]:
         # f might be None if the callback isn't implemented by the module. In this
         # case we don't want to register a callback at all so we return None.
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 6b70ea94d1..dd21a6136b 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -504,7 +504,7 @@ class UnpersistedEventContext(UnpersistedEventContextBase):
 
 
 def _encode_state_group_delta(
-    state_group_delta: Dict[Tuple[int, int], StateMap[str]]
+    state_group_delta: Dict[Tuple[int, int], StateMap[str]],
 ) -> List[Tuple[int, int, Optional[List[Tuple[str, str, str]]]]]:
     if not state_group_delta:
         return []
@@ -517,7 +517,7 @@ def _encode_state_group_delta(
 
 
 def _decode_state_group_delta(
-    input: List[Tuple[int, int, List[Tuple[str, str, str]]]]
+    input: List[Tuple[int, int, List[Tuple[str, str, str]]]],
 ) -> Dict[Tuple[int, int], StateMap[str]]:
     if not input:
         return {}
@@ -544,7 +544,7 @@ def _encode_state_dict(
 
 
 def _decode_state_dict(
-    input: Optional[List[Tuple[str, str, str]]]
+    input: Optional[List[Tuple[str, str, str]]],
 ) -> Optional[StateMap[str]]:
     """Decodes a state dict encoded using `_encode_state_dict` above"""
     if input is None:
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index f937fd4698..54f94add4d 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -49,7 +49,7 @@ from synapse.api.errors import Codes, SynapseError
 from synapse.api.room_versions import RoomVersion
 from synapse.types import JsonDict, Requester
 
-from . import EventBase, make_event_from_dict
+from . import EventBase, StrippedStateEvent, make_event_from_dict
 
 if TYPE_CHECKING:
     from synapse.handlers.relations import BundledAggregations
@@ -854,3 +854,30 @@ def strip_event(event: EventBase) -> JsonDict:
         "content": event.content,
         "sender": event.sender,
     }
+
+
+def parse_stripped_state_event(raw_stripped_event: Any) -> Optional[StrippedStateEvent]:
+    """
+    Given a raw value from an event's `unsigned` field, attempt to parse it into a
+    `StrippedStateEvent`.
+    """
+    if isinstance(raw_stripped_event, dict):
+        # All of these fields are required
+        type = raw_stripped_event.get("type")
+        state_key = raw_stripped_event.get("state_key")
+        sender = raw_stripped_event.get("sender")
+        content = raw_stripped_event.get("content")
+        if (
+            isinstance(type, str)
+            and isinstance(state_key, str)
+            and isinstance(sender, str)
+            and isinstance(content, dict)
+        ):
+            return StrippedStateEvent(
+                type=type,
+                state_key=state_key,
+                sender=sender,
+                content=content,
+            )
+
+    return None
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index 73b63b77f2..8aa8d7e017 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -19,17 +19,11 @@
 #
 #
 import collections.abc
-from typing import TYPE_CHECKING, List, Type, Union, cast
+from typing import List, Type, Union, cast
 
 import jsonschema
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import Field, StrictBool, StrictStr
-else:
-    from pydantic import Field, StrictBool, StrictStr
-
+from synapse._pydantic_compat import Field, StrictBool, StrictStr
 from synapse.api.constants import (
     MAX_ALIAS_LENGTH,
     EventContentFields,
diff --git a/synapse/federation/__init__.py b/synapse/federation/__init__.py
index a571eff590..61e28bff66 100644
--- a/synapse/federation/__init__.py
+++ b/synapse/federation/__init__.py
@@ -19,5 +19,4 @@
 #
 #
 
-""" This package includes all the federation specific logic.
-"""
+"""This package includes all the federation specific logic."""
diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py
index 0bfde00315..8340b48503 100644
--- a/synapse/federation/persistence.py
+++ b/synapse/federation/persistence.py
@@ -20,7 +20,7 @@
 #
 #
 
-""" This module contains all the persistence actions done by the federation
+"""This module contains all the persistence actions done by the federation
 package.
 
 These actions are mostly only used by the :py:mod:`.replication` module.
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index b435588da0..d097e65ea7 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -322,6 +322,7 @@ class PerDestinationQueue:
         )
 
     async def _transaction_transmission_loop(self) -> None:
+        pending_pdus: List[EventBase] = []
         try:
             self.transmission_loop_running = True
 
@@ -341,7 +342,7 @@ class PerDestinationQueue:
                 self._new_data_to_send = False
 
                 async with _TransactionQueueManager(self) as (
-                    pending_pdus,
+                    pending_pdus,  # noqa: F811
                     pending_edus,
                 ):
                     if not pending_pdus and not pending_edus:
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index c44e5daa47..43102567db 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -33,6 +33,7 @@ from synapse.federation.transport.server.federation import (
     FEDERATION_SERVLET_CLASSES,
     FederationAccountStatusServlet,
     FederationMediaDownloadServlet,
+    FederationMediaThumbnailServlet,
     FederationUnstableClientKeysClaimServlet,
 )
 from synapse.http.server import HttpServer, JsonResource
@@ -270,6 +271,10 @@ SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
     "federation": FEDERATION_SERVLET_CLASSES,
     "room_list": (PublicRoomList,),
     "openid": (OpenIdUserInfo,),
+    "media": (
+        FederationMediaDownloadServlet,
+        FederationMediaThumbnailServlet,
+    ),
 }
 
 
@@ -316,8 +321,11 @@ def register_servlets(
             ):
                 continue
 
-            if servletclass == FederationMediaDownloadServlet:
-                if not hs.config.server.enable_media_repo:
+            if (
+                servletclass == FederationMediaDownloadServlet
+                or servletclass == FederationMediaThumbnailServlet
+            ):
+                if not hs.config.media.can_load_media_repo:
                     continue
 
             servletclass(
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index e124481474..9094201da0 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -363,6 +363,8 @@ class BaseFederationServlet:
                             if (
                                 func.__self__.__class__.__name__  # type: ignore
                                 == "FederationMediaDownloadServlet"
+                                or func.__self__.__class__.__name__  # type: ignore
+                                == "FederationMediaThumbnailServlet"
                             ):
                                 response = await func(
                                     origin, content, request, *args, **kwargs
@@ -375,6 +377,8 @@ class BaseFederationServlet:
                         if (
                             func.__self__.__class__.__name__  # type: ignore
                             == "FederationMediaDownloadServlet"
+                            or func.__self__.__class__.__name__  # type: ignore
+                            == "FederationMediaThumbnailServlet"
                         ):
                             response = await func(
                                 origin, content, request, *args, **kwargs
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index ec957768d4..a05e5d5319 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -46,11 +46,13 @@ from synapse.http.servlet import (
     parse_boolean_from_args,
     parse_integer,
     parse_integer_from_args,
+    parse_string,
     parse_string_from_args,
     parse_strings_from_args,
 )
 from synapse.http.site import SynapseRequest
 from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
+from synapse.media.thumbnailer import ThumbnailProvider
 from synapse.types import JsonDict
 from synapse.util import SYNAPSE_VERSION
 from synapse.util.ratelimitutils import FederationRateLimiter
@@ -826,6 +828,58 @@ class FederationMediaDownloadServlet(BaseFederationServerServlet):
         )
 
 
+class FederationMediaThumbnailServlet(BaseFederationServerServlet):
+    """
+    Implementation of new federation media `/thumbnail` endpoint outlined in MSC3916. Returns
+    a multipart/mixed response consisting of a JSON object and the requested media
+    item. This endpoint only returns local media.
+    """
+
+    PATH = "/media/thumbnail/(?P<media_id>[^/]*)"
+    RATELIMIT = True
+
+    def __init__(
+        self,
+        hs: "HomeServer",
+        ratelimiter: FederationRateLimiter,
+        authenticator: Authenticator,
+        server_name: str,
+    ):
+        super().__init__(hs, authenticator, ratelimiter, server_name)
+        self.media_repo = self.hs.get_media_repository()
+        self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
+        self.thumbnail_provider = ThumbnailProvider(
+            hs, self.media_repo, self.media_repo.media_storage
+        )
+
+    async def on_GET(
+        self,
+        origin: Optional[str],
+        content: Literal[None],
+        request: SynapseRequest,
+        media_id: str,
+    ) -> None:
+        width = parse_integer(request, "width", required=True)
+        height = parse_integer(request, "height", required=True)
+        method = parse_string(request, "method", "scale")
+        # TODO Parse the Accept header to get an prioritised list of thumbnail types.
+        m_type = "image/png"
+        max_timeout_ms = parse_integer(
+            request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
+        )
+        max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
+
+        if self.dynamic_thumbnails:
+            await self.thumbnail_provider.select_or_generate_local_thumbnail(
+                request, media_id, width, height, method, m_type, max_timeout_ms, True
+            )
+        else:
+            await self.thumbnail_provider.respond_local_thumbnail(
+                request, media_id, width, height, method, m_type, max_timeout_ms, True
+            )
+        self.media_repo.mark_recently_accessed(None, media_id)
+
+
 FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
     FederationSendServlet,
     FederationEventServlet,
@@ -857,5 +911,4 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
     FederationV1SendKnockServlet,
     FederationMakeKnockServlet,
     FederationAccountStatusServlet,
-    FederationMediaDownloadServlet,
 )
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index b2c8ba5887..d8b67a6a5b 100644
--- a/synapse/federation/units.py
+++ b/synapse/federation/units.py
@@ -19,7 +19,7 @@
 #
 #
 
-""" Defines the JSON structure of the protocol units used by the server to
+"""Defines the JSON structure of the protocol units used by the server to
 server protocol.
 """
 
diff --git a/synapse/handlers/account.py b/synapse/handlers/account.py
index 89e944bc17..37cc3d3ff5 100644
--- a/synapse/handlers/account.py
+++ b/synapse/handlers/account.py
@@ -118,10 +118,10 @@ class AccountHandler:
             }
 
             if self._use_account_validity_in_account_status:
-                status["org.matrix.expired"] = (
-                    await self._account_validity_handler.is_user_expired(
-                        user_id.to_string()
-                    )
+                status[
+                    "org.matrix.expired"
+                ] = await self._account_validity_handler.is_user_expired(
+                    user_id.to_string()
                 )
 
         return status
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 97a463d8d0..228132db48 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -33,7 +33,7 @@ from synapse.replication.http.account_data import (
     ReplicationRemoveUserAccountDataRestServlet,
 )
 from synapse.streams import EventSource
-from synapse.types import JsonDict, StrCollection, StreamKeyType, UserID
+from synapse.types import JsonDict, JsonMapping, StrCollection, StreamKeyType, UserID
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -253,7 +253,7 @@ class AccountDataHandler:
             return response["max_stream_id"]
 
     async def add_tag_to_room(
-        self, user_id: str, room_id: str, tag: str, content: JsonDict
+        self, user_id: str, room_id: str, tag: str, content: JsonMapping
     ) -> int:
         """Add a tag to a room for a user.
 
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index ec35784c5f..851fe57a17 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -21,13 +21,34 @@
 
 import abc
 import logging
-from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Set
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    List,
+    Mapping,
+    Optional,
+    Sequence,
+    Set,
+    Tuple,
+)
 
 import attr
 
-from synapse.api.constants import Direction, Membership
+from synapse.api.constants import Direction, EventTypes, Membership
+from synapse.api.errors import SynapseError
 from synapse.events import EventBase
-from synapse.types import JsonMapping, RoomStreamToken, StateMap, UserID, UserInfo
+from synapse.types import (
+    JsonMapping,
+    Requester,
+    RoomStreamToken,
+    ScheduledTask,
+    StateMap,
+    TaskStatus,
+    UserID,
+    UserInfo,
+    create_requester,
+)
 from synapse.visibility import filter_events_for_client
 
 if TYPE_CHECKING:
@@ -35,6 +56,8 @@ if TYPE_CHECKING:
 
 logger = logging.getLogger(__name__)
 
+REDACT_ALL_EVENTS_ACTION_NAME = "redact_all_events"
+
 
 class AdminHandler:
     def __init__(self, hs: "HomeServer"):
@@ -43,6 +66,20 @@ class AdminHandler:
         self._storage_controllers = hs.get_storage_controllers()
         self._state_storage_controller = self._storage_controllers.state
         self._msc3866_enabled = hs.config.experimental.msc3866.enabled
+        self.event_creation_handler = hs.get_event_creation_handler()
+        self._task_scheduler = hs.get_task_scheduler()
+
+        self._task_scheduler.register_action(
+            self._redact_all_events, REDACT_ALL_EVENTS_ACTION_NAME
+        )
+
+    async def get_redact_task(self, redact_id: str) -> Optional[ScheduledTask]:
+        """Get the current status of an active redaction process
+
+        Args:
+            redact_id: redact_id returned by start_redact_events.
+        """
+        return await self._task_scheduler.get_task(redact_id)
 
     async def get_whois(self, user: UserID) -> JsonMapping:
         connections = []
@@ -197,8 +234,16 @@ class AdminHandler:
             # events that we have and then filtering, this isn't the most
             # efficient method perhaps but it does guarantee we get everything.
             while True:
-                events, _ = await self._store.paginate_room_events(
-                    room_id, from_key, to_key, limit=100, direction=Direction.FORWARDS
+                (
+                    events,
+                    _,
+                    _,
+                ) = await self._store.paginate_room_events_by_topological_ordering(
+                    room_id=room_id,
+                    from_key=from_key,
+                    to_key=to_key,
+                    limit=100,
+                    direction=Direction.FORWARDS,
                 )
                 if not events:
                     break
@@ -305,6 +350,153 @@ class AdminHandler:
 
         return writer.finished()
 
+    async def start_redact_events(
+        self,
+        user_id: str,
+        rooms: list,
+        requester: JsonMapping,
+        reason: Optional[str],
+        limit: Optional[int],
+    ) -> str:
+        """
+        Start a task redacting the events of the given user in the given rooms
+
+        Args:
+            user_id: the user ID of the user whose events should be redacted
+            rooms: the rooms in which to redact the user's events
+            requester: the user requesting the events
+            reason: reason for requesting the redaction, ie spam, etc
+            limit: limit on the number of events in each room to redact
+
+        Returns:
+            a unique ID which can be used to query the status of the task
+        """
+        active_tasks = await self._task_scheduler.get_tasks(
+            actions=[REDACT_ALL_EVENTS_ACTION_NAME],
+            resource_id=user_id,
+            statuses=[TaskStatus.ACTIVE],
+        )
+
+        if len(active_tasks) > 0:
+            raise SynapseError(
+                400, "Redact already in progress for user %s" % (user_id,)
+            )
+
+        if not limit:
+            limit = 1000
+
+        redact_id = await self._task_scheduler.schedule_task(
+            REDACT_ALL_EVENTS_ACTION_NAME,
+            resource_id=user_id,
+            params={
+                "rooms": rooms,
+                "requester": requester,
+                "user_id": user_id,
+                "reason": reason,
+                "limit": limit,
+            },
+        )
+
+        logger.info(
+            "starting redact events with redact_id %s",
+            redact_id,
+        )
+
+        return redact_id
+
+    async def _redact_all_events(
+        self, task: ScheduledTask
+    ) -> Tuple[TaskStatus, Optional[Mapping[str, Any]], Optional[str]]:
+        """
+        Task to redact all of a users events in the given rooms, tracking which, if any, events
+        whose redaction failed
+        """
+
+        assert task.params is not None
+        rooms = task.params.get("rooms")
+        assert rooms is not None
+
+        r = task.params.get("requester")
+        assert r is not None
+        admin = Requester.deserialize(self._store, r)
+
+        user_id = task.params.get("user_id")
+        assert user_id is not None
+
+        requester = create_requester(
+            user_id, authenticated_entity=admin.user.to_string()
+        )
+
+        reason = task.params.get("reason")
+        limit = task.params.get("limit")
+        assert limit is not None
+
+        result: Mapping[str, Any] = (
+            task.result if task.result else {"failed_redactions": {}}
+        )
+        for room in rooms:
+            room_version = await self._store.get_room_version(room)
+            event_ids = await self._store.get_events_sent_by_user_in_room(
+                user_id,
+                room,
+                limit,
+                ["m.room.member", "m.room.message"],
+            )
+            if not event_ids:
+                # nothing to redact in this room
+                continue
+
+            events = await self._store.get_events_as_list(event_ids)
+            for event in events:
+                # we care about join events but not other membership events
+                if event.type == "m.room.member":
+                    content = event.content
+                    if content:
+                        if content.get("membership") == Membership.JOIN:
+                            pass
+                        else:
+                            continue
+                relations = await self._store.get_relations_for_event(
+                    room, event.event_id, event, event_type=EventTypes.Redaction
+                )
+
+                # if we've already successfully redacted this event then skip processing it
+                if relations[0]:
+                    continue
+
+                event_dict = {
+                    "type": EventTypes.Redaction,
+                    "content": {"reason": reason} if reason else {},
+                    "room_id": room,
+                    "sender": user_id,
+                }
+                if room_version.updated_redaction_rules:
+                    event_dict["content"]["redacts"] = event.event_id
+                else:
+                    event_dict["redacts"] = event.event_id
+
+                try:
+                    # set the prev event to the offending message to allow for redactions
+                    # to be processed in the case where the user has been kicked/banned before
+                    # redactions are requested
+                    (
+                        redaction,
+                        _,
+                    ) = await self.event_creation_handler.create_and_send_nonmember_event(
+                        requester,
+                        event_dict,
+                        prev_event_ids=[event.event_id],
+                        ratelimit=False,
+                    )
+                except Exception as ex:
+                    logger.info(
+                        f"Redaction of event {event.event_id} failed due to: {ex}"
+                    )
+                    result["failed_redactions"][event.event_id] = str(ex)
+                    await self._task_scheduler.update_task(task.id, result=result)
+
+        return TaskStatus.COMPLETE, result, None
+
 
 class ExfiltrationWriter(metaclass=abc.ABCMeta):
     """Interface used to specify how to write exported data."""
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index a1fab99f6b..1f4264ad7e 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -166,8 +166,7 @@ def login_id_phone_to_thirdparty(identifier: JsonDict) -> Dict[str, str]:
     if "country" not in identifier or (
         # The specification requires a "phone" field, while Synapse used to require a "number"
         # field. Accept both for backwards compatibility.
-        "phone" not in identifier
-        and "number" not in identifier
+        "phone" not in identifier and "number" not in identifier
     ):
         raise SynapseError(
             400, "Invalid phone-type identifier", errcode=Codes.INVALID_PARAM
diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py
new file mode 100644
index 0000000000..3c88a96fd3
--- /dev/null
+++ b/synapse/handlers/delayed_events.py
@@ -0,0 +1,498 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+import logging
+from typing import TYPE_CHECKING, List, Optional, Set, Tuple
+
+from twisted.internet.interfaces import IDelayedCall
+
+from synapse.api.constants import EventTypes
+from synapse.api.errors import ShadowBanError
+from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME
+from synapse.logging.opentracing import set_tag
+from synapse.metrics import event_processing_positions
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.replication.http.delayed_events import (
+    ReplicationAddedDelayedEventRestServlet,
+)
+from synapse.storage.databases.main.delayed_events import (
+    DelayedEventDetails,
+    DelayID,
+    EventType,
+    StateKey,
+    Timestamp,
+    UserLocalpart,
+)
+from synapse.storage.databases.main.state_deltas import StateDelta
+from synapse.types import (
+    JsonDict,
+    Requester,
+    RoomID,
+    UserID,
+    create_requester,
+)
+from synapse.util.events import generate_fake_event_id
+from synapse.util.metrics import Measure
+
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class DelayedEventsHandler:
+    def __init__(self, hs: "HomeServer"):
+        self._store = hs.get_datastores().main
+        self._storage_controllers = hs.get_storage_controllers()
+        self._config = hs.config
+        self._clock = hs.get_clock()
+        self._request_ratelimiter = hs.get_request_ratelimiter()
+        self._event_creation_handler = hs.get_event_creation_handler()
+        self._room_member_handler = hs.get_room_member_handler()
+
+        self._next_delayed_event_call: Optional[IDelayedCall] = None
+
+        # The current position in the current_state_delta stream
+        self._event_pos: Optional[int] = None
+
+        # Guard to ensure we only process event deltas one at a time
+        self._event_processing = False
+
+        if hs.config.worker.worker_app is None:
+            self._repl_client = None
+
+            async def _schedule_db_events() -> None:
+                # We kick this off to pick up outstanding work from before the last restart.
+                # Block until we're up to date.
+                await self._unsafe_process_new_event()
+                hs.get_notifier().add_replication_callback(self.notify_new_event)
+                # Kick off again (without blocking) to catch any missed notifications
+                # that may have fired before the callback was added.
+                self._clock.call_later(0, self.notify_new_event)
+
+                # Delayed events that are already marked as processed on startup might not have been
+                # sent properly on the last run of the server, so unmark them to send them again.
+                # Caveat: this will double-send delayed events that successfully persisted, but failed
+                # to be removed from the DB table of delayed events.
+                # TODO: To avoid double-sending, scan the timeline to find which of these events were
+                # already sent. To do so, must store delay_ids in sent events to retrieve them later.
+                await self._store.unprocess_delayed_events()
+
+                events, next_send_ts = await self._store.process_timeout_delayed_events(
+                    self._get_current_ts()
+                )
+
+                if next_send_ts:
+                    self._schedule_next_at(next_send_ts)
+
+                # Can send the events in background after having awaited on marking them as processed
+                run_as_background_process(
+                    "_send_events",
+                    self._send_events,
+                    events,
+                )
+
+            self._initialized_from_db = run_as_background_process(
+                "_schedule_db_events", _schedule_db_events
+            )
+        else:
+            self._repl_client = ReplicationAddedDelayedEventRestServlet.make_client(hs)
+
+    @property
+    def _is_master(self) -> bool:
+        return self._repl_client is None
+
+    def notify_new_event(self) -> None:
+        """
+        Called when there may be more state event deltas to process,
+        which should cancel pending delayed events for the same state.
+        """
+        if self._event_processing:
+            return
+
+        self._event_processing = True
+
+        async def process() -> None:
+            try:
+                await self._unsafe_process_new_event()
+            finally:
+                self._event_processing = False
+
+        run_as_background_process("delayed_events.notify_new_event", process)
+
+    async def _unsafe_process_new_event(self) -> None:
+        # If self._event_pos is None then means we haven't fetched it from the DB yet
+        if self._event_pos is None:
+            self._event_pos = await self._store.get_delayed_events_stream_pos()
+            room_max_stream_ordering = self._store.get_room_max_stream_ordering()
+            if self._event_pos > room_max_stream_ordering:
+                # apparently, we've processed more events than exist in the database!
+                # this can happen if events are removed with history purge or similar.
+                logger.warning(
+                    "Event stream ordering appears to have gone backwards (%i -> %i): "
+                    "rewinding delayed events processor",
+                    self._event_pos,
+                    room_max_stream_ordering,
+                )
+                self._event_pos = room_max_stream_ordering
+
+        # Loop round handling deltas until we're up to date
+        while True:
+            with Measure(self._clock, "delayed_events_delta"):
+                room_max_stream_ordering = self._store.get_room_max_stream_ordering()
+                if self._event_pos == room_max_stream_ordering:
+                    return
+
+                logger.debug(
+                    "Processing delayed events %s->%s",
+                    self._event_pos,
+                    room_max_stream_ordering,
+                )
+                (
+                    max_pos,
+                    deltas,
+                ) = await self._storage_controllers.state.get_current_state_deltas(
+                    self._event_pos, room_max_stream_ordering
+                )
+
+                logger.debug(
+                    "Handling %d state deltas for delayed events processing",
+                    len(deltas),
+                )
+                await self._handle_state_deltas(deltas)
+
+                self._event_pos = max_pos
+
+                # Expose current event processing position to prometheus
+                event_processing_positions.labels("delayed_events").set(max_pos)
+
+                await self._store.update_delayed_events_stream_pos(max_pos)
+
+    async def _handle_state_deltas(self, deltas: List[StateDelta]) -> None:
+        """
+        Process current state deltas to cancel pending delayed events
+        that target the same state.
+        """
+        for delta in deltas:
+            logger.debug(
+                "Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id
+            )
+
+            next_send_ts = await self._store.cancel_delayed_state_events(
+                room_id=delta.room_id,
+                event_type=delta.event_type,
+                state_key=delta.state_key,
+            )
+
+            if self._next_send_ts_changed(next_send_ts):
+                self._schedule_next_at_or_none(next_send_ts)
+
+    async def add(
+        self,
+        requester: Requester,
+        *,
+        room_id: str,
+        event_type: str,
+        state_key: Optional[str],
+        origin_server_ts: Optional[int],
+        content: JsonDict,
+        delay: int,
+    ) -> str:
+        """
+        Creates a new delayed event and schedules its delivery.
+
+        Args:
+            requester: The requester of the delayed event, who will be its owner.
+            room_id: The ID of the room where the event should be sent to.
+            event_type: The type of event to be sent.
+            state_key: The state key of the event to be sent, or None if it is not a state event.
+            origin_server_ts: The custom timestamp to send the event with.
+                If None, the timestamp will be the actual time when the event is sent.
+            content: The content of the event to be sent.
+            delay: How long (in milliseconds) to wait before automatically sending the event.
+
+        Returns: The ID of the added delayed event.
+
+        Raises:
+            SynapseError: if the delayed event fails validation checks.
+        """
+        await self._request_ratelimiter.ratelimit(requester)
+
+        self._event_creation_handler.validator.validate_builder(
+            self._event_creation_handler.event_builder_factory.for_room_version(
+                await self._store.get_room_version(room_id),
+                {
+                    "type": event_type,
+                    "content": content,
+                    "room_id": room_id,
+                    "sender": str(requester.user),
+                    **({"state_key": state_key} if state_key is not None else {}),
+                },
+            )
+        )
+
+        creation_ts = self._get_current_ts()
+
+        delay_id, next_send_ts = await self._store.add_delayed_event(
+            user_localpart=requester.user.localpart,
+            device_id=requester.device_id,
+            creation_ts=creation_ts,
+            room_id=room_id,
+            event_type=event_type,
+            state_key=state_key,
+            origin_server_ts=origin_server_ts,
+            content=content,
+            delay=delay,
+        )
+
+        if self._repl_client is not None:
+            # NOTE: If this throws, the delayed event will remain in the DB and
+            # will be picked up once the main worker gets another delayed event.
+            await self._repl_client(
+                instance_name=MAIN_PROCESS_INSTANCE_NAME,
+                next_send_ts=next_send_ts,
+            )
+        elif self._next_send_ts_changed(next_send_ts):
+            self._schedule_next_at(next_send_ts)
+
+        return delay_id
+
+    def on_added(self, next_send_ts: int) -> None:
+        next_send_ts = Timestamp(next_send_ts)
+        if self._next_send_ts_changed(next_send_ts):
+            self._schedule_next_at(next_send_ts)
+
+    async def cancel(self, requester: Requester, delay_id: str) -> None:
+        """
+        Cancels the scheduled delivery of the matching delayed event.
+
+        Args:
+            requester: The owner of the delayed event to act on.
+            delay_id: The ID of the delayed event to act on.
+
+        Raises:
+            NotFoundError: if no matching delayed event could be found.
+        """
+        assert self._is_master
+        await self._request_ratelimiter.ratelimit(requester)
+        await self._initialized_from_db
+
+        next_send_ts = await self._store.cancel_delayed_event(
+            delay_id=delay_id,
+            user_localpart=requester.user.localpart,
+        )
+
+        if self._next_send_ts_changed(next_send_ts):
+            self._schedule_next_at_or_none(next_send_ts)
+
+    async def restart(self, requester: Requester, delay_id: str) -> None:
+        """
+        Restarts the scheduled delivery of the matching delayed event.
+
+        Args:
+            requester: The owner of the delayed event to act on.
+            delay_id: The ID of the delayed event to act on.
+
+        Raises:
+            NotFoundError: if no matching delayed event could be found.
+        """
+        assert self._is_master
+        await self._request_ratelimiter.ratelimit(requester)
+        await self._initialized_from_db
+
+        next_send_ts = await self._store.restart_delayed_event(
+            delay_id=delay_id,
+            user_localpart=requester.user.localpart,
+            current_ts=self._get_current_ts(),
+        )
+
+        if self._next_send_ts_changed(next_send_ts):
+            self._schedule_next_at(next_send_ts)
+
+    async def send(self, requester: Requester, delay_id: str) -> None:
+        """
+        Immediately sends the matching delayed event, instead of waiting for its scheduled delivery.
+
+        Args:
+            requester: The owner of the delayed event to act on.
+            delay_id: The ID of the delayed event to act on.
+
+        Raises:
+            NotFoundError: if no matching delayed event could be found.
+        """
+        assert self._is_master
+        await self._request_ratelimiter.ratelimit(requester)
+        await self._initialized_from_db
+
+        event, next_send_ts = await self._store.process_target_delayed_event(
+            delay_id=delay_id,
+            user_localpart=requester.user.localpart,
+        )
+
+        if self._next_send_ts_changed(next_send_ts):
+            self._schedule_next_at_or_none(next_send_ts)
+
+        await self._send_event(
+            DelayedEventDetails(
+                delay_id=DelayID(delay_id),
+                user_localpart=UserLocalpart(requester.user.localpart),
+                room_id=event.room_id,
+                type=event.type,
+                state_key=event.state_key,
+                origin_server_ts=event.origin_server_ts,
+                content=event.content,
+                device_id=event.device_id,
+            )
+        )
+
+    async def _send_on_timeout(self) -> None:
+        self._next_delayed_event_call = None
+
+        events, next_send_ts = await self._store.process_timeout_delayed_events(
+            self._get_current_ts()
+        )
+
+        if next_send_ts:
+            self._schedule_next_at(next_send_ts)
+
+        await self._send_events(events)
+
+    async def _send_events(self, events: List[DelayedEventDetails]) -> None:
+        sent_state: Set[Tuple[RoomID, EventType, StateKey]] = set()
+        for event in events:
+            if event.state_key is not None:
+                state_info = (event.room_id, event.type, event.state_key)
+                if state_info in sent_state:
+                    continue
+            else:
+                state_info = None
+            try:
+                # TODO: send in background if message event or non-conflicting state event
+                await self._send_event(event)
+                if state_info is not None:
+                    sent_state.add(state_info)
+            except Exception:
+                logger.exception("Failed to send delayed event")
+
+            for room_id, event_type, state_key in sent_state:
+                await self._store.delete_processed_delayed_state_events(
+                    room_id=str(room_id),
+                    event_type=event_type,
+                    state_key=state_key,
+                )
+
+    def _schedule_next_at_or_none(self, next_send_ts: Optional[Timestamp]) -> None:
+        if next_send_ts is not None:
+            self._schedule_next_at(next_send_ts)
+        elif self._next_delayed_event_call is not None:
+            self._next_delayed_event_call.cancel()
+            self._next_delayed_event_call = None
+
+    def _schedule_next_at(self, next_send_ts: Timestamp) -> None:
+        delay = next_send_ts - self._get_current_ts()
+        delay_sec = delay / 1000 if delay > 0 else 0
+
+        if self._next_delayed_event_call is None:
+            self._next_delayed_event_call = self._clock.call_later(
+                delay_sec,
+                run_as_background_process,
+                "_send_on_timeout",
+                self._send_on_timeout,
+            )
+        else:
+            self._next_delayed_event_call.reset(delay_sec)
+
+    async def get_all_for_user(self, requester: Requester) -> List[JsonDict]:
+        """Return all pending delayed events requested by the given user."""
+        await self._request_ratelimiter.ratelimit(requester)
+        return await self._store.get_all_delayed_events_for_user(
+            requester.user.localpart
+        )
+
+    async def _send_event(
+        self,
+        event: DelayedEventDetails,
+        txn_id: Optional[str] = None,
+    ) -> None:
+        user_id = UserID(event.user_localpart, self._config.server.server_name)
+        user_id_str = user_id.to_string()
+        # Create a new requester from what data is currently available
+        requester = create_requester(
+            user_id,
+            is_guest=await self._store.is_guest(user_id_str),
+            device_id=event.device_id,
+        )
+
+        try:
+            if event.state_key is not None and event.type == EventTypes.Member:
+                membership = event.content.get("membership")
+                assert membership is not None
+                event_id, _ = await self._room_member_handler.update_membership(
+                    requester,
+                    target=UserID.from_string(event.state_key),
+                    room_id=event.room_id.to_string(),
+                    action=membership,
+                    content=event.content,
+                    origin_server_ts=event.origin_server_ts,
+                )
+            else:
+                event_dict: JsonDict = {
+                    "type": event.type,
+                    "content": event.content,
+                    "room_id": event.room_id.to_string(),
+                    "sender": user_id_str,
+                }
+
+                if event.origin_server_ts is not None:
+                    event_dict["origin_server_ts"] = event.origin_server_ts
+
+                if event.state_key is not None:
+                    event_dict["state_key"] = event.state_key
+
+                (
+                    sent_event,
+                    _,
+                ) = await self._event_creation_handler.create_and_send_nonmember_event(
+                    requester,
+                    event_dict,
+                    txn_id=txn_id,
+                )
+                event_id = sent_event.event_id
+        except ShadowBanError:
+            event_id = generate_fake_event_id()
+        finally:
+            # TODO: If this is a temporary error, retry. Otherwise, consider notifying clients of the failure
+            try:
+                await self._store.delete_processed_delayed_event(
+                    event.delay_id, event.user_localpart
+                )
+            except Exception:
+                logger.exception("Failed to delete processed delayed event")
+
+        set_tag("event_id", event_id)
+
+    def _get_current_ts(self) -> Timestamp:
+        return Timestamp(self._clock.time_msec())
+
+    def _next_send_ts_changed(self, next_send_ts: Optional[Timestamp]) -> bool:
+        # The DB alone knows if the next send time changed after adding/modifying
+        # a delayed event, but if we were to ever miss updating our delayed call's
+        # firing time, we may miss other updates. So, keep track of changes to the
+        # the next send time here instead of in the DB.
+        cached_next_send_ts = (
+            int(self._next_delayed_event_call.getTime() * 1000)
+            if self._next_delayed_event_call is not None
+            else None
+        )
+        return next_send_ts != cached_next_send_ts
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 0432d97109..d88660e273 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -20,10 +20,20 @@
 #
 #
 import logging
-from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Set, Tuple
+from typing import (
+    TYPE_CHECKING,
+    AbstractSet,
+    Dict,
+    Iterable,
+    List,
+    Mapping,
+    Optional,
+    Set,
+    Tuple,
+)
 
 from synapse.api import errors
-from synapse.api.constants import EduTypes, EventTypes
+from synapse.api.constants import EduTypes, EventTypes, Membership
 from synapse.api.errors import (
     Codes,
     FederationDeniedError,
@@ -38,7 +48,10 @@ from synapse.metrics.background_process_metrics import (
     wrap_as_background_process,
 )
 from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo
+from synapse.storage.databases.main.roommember import EventIdMembership
+from synapse.storage.databases.main.state_deltas import StateDelta
 from synapse.types import (
+    DeviceListUpdates,
     JsonDict,
     JsonMapping,
     ScheduledTask,
@@ -210,142 +223,214 @@ class DeviceWorkerHandler:
         return changed
 
     @trace
-    @measure_func("device.get_user_ids_changed")
     @cancellable
     async def get_user_ids_changed(
         self, user_id: str, from_token: StreamToken
-    ) -> JsonDict:
+    ) -> DeviceListUpdates:
         """Get list of users that have had the devices updated, or have newly
         joined a room, that `user_id` may be interested in.
         """
 
         set_tag("user_id", user_id)
         set_tag("from_token", str(from_token))
-        now_room_key = self.store.get_room_max_token()
 
-        room_ids = await self.store.get_rooms_for_user(user_id)
+        now_token = self._event_sources.get_current_token()
 
-        changed = await self.get_device_changes_in_shared_rooms(
-            user_id, room_ids, from_token
+        # We need to work out all the different membership changes for the user
+        # and user they share a room with, to pass to
+        # `generate_sync_entry_for_device_list`. See its docstring for details
+        # on the data required.
+
+        joined_room_ids = await self.store.get_rooms_for_user(user_id)
+
+        # Get the set of rooms that the user has joined/left
+        membership_changes = (
+            await self.store.get_current_state_delta_membership_changes_for_user(
+                user_id, from_key=from_token.room_key, to_key=now_token.room_key
+            )
         )
 
-        # Then work out if any users have since joined
-        rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
+        # Check for newly joined or left rooms. We need to make sure that we add
+        # to newly joined in the case membership goes from join -> leave -> join
+        # again.
+        newly_joined_rooms: Set[str] = set()
+        newly_left_rooms: Set[str] = set()
+        for change in membership_changes:
+            # We check for changes in "joinedness", i.e. if the membership has
+            # changed to or from JOIN.
+            if change.membership == Membership.JOIN:
+                if change.prev_membership != Membership.JOIN:
+                    newly_joined_rooms.add(change.room_id)
+                    newly_left_rooms.discard(change.room_id)
+            elif change.prev_membership == Membership.JOIN:
+                newly_joined_rooms.discard(change.room_id)
+                newly_left_rooms.add(change.room_id)
 
-        member_events = await self.store.get_membership_changes_for_user(
-            user_id, from_token.room_key, now_room_key
+        # We now work out if any other users have since joined or left the rooms
+        # the user is currently in.
+
+        # List of membership changes per room
+        room_to_deltas: Dict[str, List[StateDelta]] = {}
+        # The set of event IDs of membership events (so we can fetch their
+        # associated membership).
+        memberships_to_fetch: Set[str] = set()
+
+        # TODO: Only pull out membership events?
+        state_changes = await self.store.get_current_state_deltas_for_rooms(
+            joined_room_ids, from_token=from_token.room_key, to_token=now_token.room_key
         )
-        rooms_changed.update(event.room_id for event in member_events)
+        for delta in state_changes:
+            if delta.event_type != EventTypes.Member:
+                continue
 
-        stream_ordering = from_token.room_key.stream
+            room_to_deltas.setdefault(delta.room_id, []).append(delta)
+            if delta.event_id:
+                memberships_to_fetch.add(delta.event_id)
+            if delta.prev_event_id:
+                memberships_to_fetch.add(delta.prev_event_id)
 
-        possibly_changed = set(changed)
-        possibly_left = set()
-        for room_id in rooms_changed:
-            # Check if the forward extremities have changed. If not then we know
-            # the current state won't have changed, and so we can skip this room.
-            try:
-                if not await self.store.have_room_forward_extremities_changed_since(
-                    room_id, stream_ordering
-                ):
-                    continue
-            except errors.StoreError:
-                pass
-
-            current_state_ids = await self._state_storage.get_current_state_ids(
-                room_id, await_full_state=False
+        # Fetch all the memberships for the membership events
+        event_id_to_memberships: Mapping[str, Optional[EventIdMembership]] = {}
+        if memberships_to_fetch:
+            event_id_to_memberships = await self.store.get_membership_from_event_ids(
+                memberships_to_fetch
             )
 
-            # The user may have left the room
-            # TODO: Check if they actually did or if we were just invited.
-            if room_id not in room_ids:
-                for etype, state_key in current_state_ids.keys():
-                    if etype != EventTypes.Member:
-                        continue
-                    possibly_left.add(state_key)
-                continue
+        joined_invited_knocked = (
+            Membership.JOIN,
+            Membership.INVITE,
+            Membership.KNOCK,
+        )
 
-            # Fetch the current state at the time.
-            try:
-                event_ids = await self.store.get_forward_extremities_for_room_at_stream_ordering(
-                    room_id, stream_ordering=stream_ordering
-                )
-            except errors.StoreError:
-                # we have purged the stream_ordering index since the stream
-                # ordering: treat it the same as a new room
-                event_ids = []
+        # We now want to find any user that have newly joined/invited/knocked,
+        # or newly left, similarly to above.
+        newly_joined_or_invited_or_knocked_users: Set[str] = set()
+        newly_left_users: Set[str] = set()
+        for _, deltas in room_to_deltas.items():
+            for delta in deltas:
+                # Get the prev/new memberships for the delta
+                new_membership = None
+                prev_membership = None
+                if delta.event_id:
+                    m = event_id_to_memberships.get(delta.event_id)
+                    if m is not None:
+                        new_membership = m.membership
+                if delta.prev_event_id:
+                    m = event_id_to_memberships.get(delta.prev_event_id)
+                    if m is not None:
+                        prev_membership = m.membership
 
-            # special-case for an empty prev state: include all members
-            # in the changed list
-            if not event_ids:
-                log_kv(
-                    {"event": "encountered empty previous state", "room_id": room_id}
-                )
-                for etype, state_key in current_state_ids.keys():
-                    if etype != EventTypes.Member:
-                        continue
-                    possibly_changed.add(state_key)
-                continue
+                # Check if a user has newly joined/invited/knocked, or left.
+                if new_membership in joined_invited_knocked:
+                    if prev_membership not in joined_invited_knocked:
+                        newly_joined_or_invited_or_knocked_users.add(delta.state_key)
+                        newly_left_users.discard(delta.state_key)
+                elif prev_membership in joined_invited_knocked:
+                    newly_joined_or_invited_or_knocked_users.discard(delta.state_key)
+                    newly_left_users.add(delta.state_key)
 
-            current_member_id = current_state_ids.get((EventTypes.Member, user_id))
-            if not current_member_id:
-                continue
+        # Now we actually calculate the device list entry with the information
+        # calculated above.
+        device_list_updates = await self.generate_sync_entry_for_device_list(
+            user_id=user_id,
+            since_token=from_token,
+            now_token=now_token,
+            joined_room_ids=joined_room_ids,
+            newly_joined_rooms=newly_joined_rooms,
+            newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
+            newly_left_rooms=newly_left_rooms,
+            newly_left_users=newly_left_users,
+        )
 
-            # mapping from event_id -> state_dict
-            prev_state_ids = await self._state_storage.get_state_ids_for_events(
-                event_ids,
-                await_full_state=False,
-            )
+        log_kv(
+            {
+                "changed": device_list_updates.changed,
+                "left": device_list_updates.left,
+            }
+        )
 
-            # Check if we've joined the room? If so we just blindly add all the users to
-            # the "possibly changed" users.
-            for state_dict in prev_state_ids.values():
-                member_event = state_dict.get((EventTypes.Member, user_id), None)
-                if not member_event or member_event != current_member_id:
-                    for etype, state_key in current_state_ids.keys():
-                        if etype != EventTypes.Member:
-                            continue
-                        possibly_changed.add(state_key)
-                    break
+        return device_list_updates
 
-            # If there has been any change in membership, include them in the
-            # possibly changed list. We'll check if they are joined below,
-            # and we're not toooo worried about spuriously adding users.
-            for key, event_id in current_state_ids.items():
-                etype, state_key = key
-                if etype != EventTypes.Member:
-                    continue
+    async def generate_sync_entry_for_device_list(
+        self,
+        user_id: str,
+        since_token: StreamToken,
+        now_token: StreamToken,
+        joined_room_ids: AbstractSet[str],
+        newly_joined_rooms: AbstractSet[str],
+        newly_joined_or_invited_or_knocked_users: AbstractSet[str],
+        newly_left_rooms: AbstractSet[str],
+        newly_left_users: AbstractSet[str],
+    ) -> DeviceListUpdates:
+        """Generate the DeviceListUpdates section of sync
 
-                # check if this member has changed since any of the extremities
-                # at the stream_ordering, and add them to the list if so.
-                for state_dict in prev_state_ids.values():
-                    prev_event_id = state_dict.get(key, None)
-                    if not prev_event_id or prev_event_id != event_id:
-                        if state_key != user_id:
-                            possibly_changed.add(state_key)
-                        break
+        Args:
+            sync_result_builder
+            newly_joined_rooms: Set of rooms user has joined since previous sync
+            newly_joined_or_invited_or_knocked_users: Set of users that have joined,
+                been invited to a room or are knocking on a room since
+                previous sync.
+            newly_left_rooms: Set of rooms user has left since previous sync
+            newly_left_users: Set of users that have left a room we're in since
+                previous sync
+        """
+        # Take a copy since these fields will be mutated later.
+        newly_joined_or_invited_or_knocked_users = set(
+            newly_joined_or_invited_or_knocked_users
+        )
+        newly_left_users = set(newly_left_users)
 
-        if possibly_changed or possibly_left:
-            possibly_joined = possibly_changed
-            possibly_left = possibly_changed | possibly_left
+        # We want to figure out what user IDs the client should refetch
+        # device keys for, and which users we aren't going to track changes
+        # for anymore.
+        #
+        # For the first step we check:
+        #   a. if any users we share a room with have updated their devices,
+        #      and
+        #   b. we also check if we've joined any new rooms, or if a user has
+        #      joined a room we're in.
+        #
+        # For the second step we just find any users we no longer share a
+        # room with by looking at all users that have left a room plus users
+        # that were in a room we've left.
 
-            # Double check if we still share rooms with the given user.
-            users_rooms = await self.store.get_rooms_for_users(possibly_left)
-            for changed_user_id, entries in users_rooms.items():
-                if any(rid in room_ids for rid in entries):
-                    possibly_left.discard(changed_user_id)
-                else:
-                    possibly_joined.discard(changed_user_id)
+        users_that_have_changed = set()
 
-        else:
-            possibly_joined = set()
-            possibly_left = set()
+        # Step 1a, check for changes in devices of users we share a room
+        # with
+        users_that_have_changed = await self.get_device_changes_in_shared_rooms(
+            user_id,
+            joined_room_ids,
+            from_token=since_token,
+            now_token=now_token,
+        )
 
-        result = {"changed": list(possibly_joined), "left": list(possibly_left)}
+        # Step 1b, check for newly joined rooms
+        for room_id in newly_joined_rooms:
+            joined_users = await self.store.get_users_in_room(room_id)
+            newly_joined_or_invited_or_knocked_users.update(joined_users)
 
-        log_kv(result)
+        # TODO: Check that these users are actually new, i.e. either they
+        # weren't in the previous sync *or* they left and rejoined.
+        users_that_have_changed.update(newly_joined_or_invited_or_knocked_users)
 
-        return result
+        user_signatures_changed = await self.store.get_users_whose_signatures_changed(
+            user_id, since_token.device_list_key
+        )
+        users_that_have_changed.update(user_signatures_changed)
+
+        # Now find users that we no longer track
+        for room_id in newly_left_rooms:
+            left_users = await self.store.get_users_in_room(room_id)
+            newly_left_users.update(left_users)
+
+        # Remove any users that we still share a room with.
+        left_users_rooms = await self.store.get_rooms_for_users(newly_left_users)
+        for user_id, entries in left_users_rooms.items():
+            if any(rid in joined_room_ids for rid in entries):
+                newly_left_users.discard(user_id)
+
+        return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users)
 
     async def on_federation_query_user_devices(self, user_id: str) -> JsonDict:
         if not self.hs.is_mine(UserID.from_string(user_id)):
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index ad2b0f5fcc..62ce16794f 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -265,9 +265,9 @@ class DirectoryHandler:
     async def get_association(self, room_alias: RoomAlias) -> JsonDict:
         room_id = None
         if self.hs.is_mine(room_alias):
-            result: Optional[RoomAliasMapping] = (
-                await self.get_association_from_room_alias(room_alias)
-            )
+            result: Optional[
+                RoomAliasMapping
+            ] = await self.get_association_from_room_alias(room_alias)
 
             if result:
                 room_id = result.room_id
@@ -512,11 +512,9 @@ class DirectoryHandler:
                 raise SynapseError(403, "Not allowed to publish room")
 
             # Check if publishing is blocked by a third party module
-            allowed_by_third_party_rules = (
-                await (
-                    self._third_party_event_rules.check_visibility_can_be_modified(
-                        room_id, visibility
-                    )
+            allowed_by_third_party_rules = await (
+                self._third_party_event_rules.check_visibility_can_be_modified(
+                    room_id, visibility
                 )
             )
             if not allowed_by_third_party_rules:
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 668cec513b..f78e66ad0a 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -291,13 +291,20 @@ class E2eKeysHandler:
 
             # Only try and fetch keys for destinations that are not marked as
             # down.
-            filtered_destinations = await filter_destinations_by_retry_limiter(
-                remote_queries_not_in_cache.keys(),
-                self.clock,
-                self.store,
-                # Let's give an arbitrary grace period for those hosts that are
-                # only recently down
-                retry_due_within_ms=60 * 1000,
+            unfiltered_destinations = remote_queries_not_in_cache.keys()
+            filtered_destinations = set(
+                await filter_destinations_by_retry_limiter(
+                    unfiltered_destinations,
+                    self.clock,
+                    self.store,
+                    # Let's give an arbitrary grace period for those hosts that are
+                    # only recently down
+                    retry_due_within_ms=60 * 1000,
+                )
+            )
+            failures.update(
+                (dest, _NOT_READY_FOR_RETRY_FAILURE)
+                for dest in (unfiltered_destinations - filtered_destinations)
             )
 
             await concurrently_execute(
@@ -1641,6 +1648,9 @@ def _check_device_signature(
         raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
 
 
+_NOT_READY_FOR_RETRY_FAILURE = {"status": 503, "message": "Not ready for retry"}
+
+
 def _exception_to_failure(e: Exception) -> JsonDict:
     if isinstance(e, SynapseError):
         return {"status": e.code, "errcode": e.errcode, "message": str(e)}
@@ -1649,7 +1659,7 @@ def _exception_to_failure(e: Exception) -> JsonDict:
         return {"status": e.code, "message": str(e)}
 
     if isinstance(e, NotRetryingDestination):
-        return {"status": 503, "message": "Not ready for retry"}
+        return _NOT_READY_FOR_RETRY_FAILURE
 
     # include ConnectionRefused and other errors
     #
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 99f9f6e64a..f397911f28 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -34,7 +34,7 @@ from synapse.api.errors import (
 from synapse.logging.opentracing import log_kv, trace
 from synapse.storage.databases.main.e2e_room_keys import RoomKey
 from synapse.types import JsonDict
-from synapse.util.async_helpers import Linearizer
+from synapse.util.async_helpers import ReadWriteLock
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -58,7 +58,7 @@ class E2eRoomKeysHandler:
         # clients belonging to a user will receive and try to upload a new session at
         # roughly the same time.  Also used to lock out uploads when the key is being
         # changed.
-        self._upload_linearizer = Linearizer("upload_room_keys_lock")
+        self._upload_lock = ReadWriteLock()
 
     @trace
     async def get_room_keys(
@@ -89,7 +89,7 @@ class E2eRoomKeysHandler:
 
         # we deliberately take the lock to get keys so that changing the version
         # works atomically
-        async with self._upload_linearizer.queue(user_id):
+        async with self._upload_lock.read(user_id):
             # make sure the backup version exists
             try:
                 await self.store.get_e2e_room_keys_version_info(user_id, version)
@@ -132,7 +132,7 @@ class E2eRoomKeysHandler:
         """
 
         # lock for consistency with uploading
-        async with self._upload_linearizer.queue(user_id):
+        async with self._upload_lock.write(user_id):
             # make sure the backup version exists
             try:
                 version_info = await self.store.get_e2e_room_keys_version_info(
@@ -193,7 +193,7 @@ class E2eRoomKeysHandler:
         # TODO: Validate the JSON to make sure it has the right keys.
 
         # XXX: perhaps we should use a finer grained lock here?
-        async with self._upload_linearizer.queue(user_id):
+        async with self._upload_lock.write(user_id):
             # Check that the version we're trying to upload is the current version
             try:
                 version_info = await self.store.get_e2e_room_keys_version_info(user_id)
@@ -355,7 +355,7 @@ class E2eRoomKeysHandler:
         # TODO: Validate the JSON to make sure it has the right keys.
 
         # lock everyone out until we've switched version
-        async with self._upload_linearizer.queue(user_id):
+        async with self._upload_lock.write(user_id):
             new_version = await self.store.create_e2e_room_keys_version(
                 user_id, version_info
             )
@@ -382,7 +382,7 @@ class E2eRoomKeysHandler:
         }
         """
 
-        async with self._upload_linearizer.queue(user_id):
+        async with self._upload_lock.read(user_id):
             try:
                 res = await self.store.get_e2e_room_keys_version_info(user_id, version)
             except StoreError as e:
@@ -407,7 +407,7 @@ class E2eRoomKeysHandler:
             NotFoundError: if this backup version doesn't exist
         """
 
-        async with self._upload_linearizer.queue(user_id):
+        async with self._upload_lock.write(user_id):
             try:
                 await self.store.delete_e2e_room_keys_version(user_id, version)
             except StoreError as e:
@@ -437,7 +437,7 @@ class E2eRoomKeysHandler:
             raise SynapseError(
                 400, "Version in body does not match", Codes.INVALID_PARAM
             )
-        async with self._upload_linearizer.queue(user_id):
+        async with self._upload_lock.write(user_id):
             try:
                 old_info = await self.store.get_e2e_room_keys_version_info(
                     user_id, version
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 299588e476..2b7aad5b58 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1001,11 +1001,11 @@ class FederationHandler:
                     )
 
                 if include_auth_user_id:
-                    event_content[EventContentFields.AUTHORISING_USER] = (
-                        await self._event_auth_handler.get_user_which_could_invite(
-                            room_id,
-                            state_ids,
-                        )
+                    event_content[
+                        EventContentFields.AUTHORISING_USER
+                    ] = await self._event_auth_handler.get_user_which_could_invite(
+                        room_id,
+                        state_ids,
                     )
 
         builder = self.event_builder_factory.for_room_version(
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index cb31d65aa9..89191217d6 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -21,6 +21,7 @@
 #
 
 """Utilities for interacting with Identity Servers"""
+
 import logging
 import urllib.parse
 from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple
diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py
index 5fa7a305ad..400f3a59aa 100644
--- a/synapse/handlers/jwt.py
+++ b/synapse/handlers/jwt.py
@@ -18,7 +18,7 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Optional, Tuple
 
 from authlib.jose import JsonWebToken, JWTClaims
 from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError
@@ -36,11 +36,12 @@ class JwtHandler:
 
         self.jwt_secret = hs.config.jwt.jwt_secret
         self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim
+        self.jwt_display_name_claim = hs.config.jwt.jwt_display_name_claim
         self.jwt_algorithm = hs.config.jwt.jwt_algorithm
         self.jwt_issuer = hs.config.jwt.jwt_issuer
         self.jwt_audiences = hs.config.jwt.jwt_audiences
 
-    def validate_login(self, login_submission: JsonDict) -> str:
+    def validate_login(self, login_submission: JsonDict) -> Tuple[str, Optional[str]]:
         """
         Authenticates the user for the /login API
 
@@ -49,7 +50,8 @@ class JwtHandler:
                 (including 'type' and other relevant fields)
 
         Returns:
-            The user ID that is logging in.
+            A tuple of (user_id, display_name) of the user that is logging in.
+            If the JWT does not contain a display name, the second element of the tuple will be None.
 
         Raises:
             LoginError if there was an authentication problem.
@@ -109,4 +111,10 @@ class JwtHandler:
         if user is None:
             raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN)
 
-        return UserID(user, self.hs.hostname).to_string()
+        default_display_name = None
+        if self.jwt_display_name_claim:
+            display_name_claim = claims.get(self.jwt_display_name_claim)
+            if display_name_claim is not None:
+                default_display_name = display_name_claim
+
+        return UserID(user, self.hs.hostname).to_string(), default_display_name
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 5aa48230ec..204965afee 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -1225,10 +1225,9 @@ class EventCreationHandler:
             )
 
         if prev_event_ids is not None:
-            assert (
-                len(prev_event_ids) <= 10
-            ), "Attempting to create an event with %i prev_events" % (
-                len(prev_event_ids),
+            assert len(prev_event_ids) <= 10, (
+                "Attempting to create an event with %i prev_events"
+                % (len(prev_event_ids),)
             )
         else:
             prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 872c85fbad..4070b74b7a 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -507,7 +507,11 @@ class PaginationHandler:
 
         # Initially fetch the events from the database. With any luck, we can return
         # these without blocking on backfill (handled below).
-        events, next_key = await self.store.paginate_room_events(
+        (
+            events,
+            next_key,
+            _,
+        ) = await self.store.paginate_room_events_by_topological_ordering(
             room_id=room_id,
             from_key=from_token.room_key,
             to_key=to_room_key,
@@ -582,7 +586,11 @@ class PaginationHandler:
                 # If we did backfill something, refetch the events from the database to
                 # catch anything new that might have been added since we last fetched.
                 if did_backfill:
-                    events, next_key = await self.store.paginate_room_events(
+                    (
+                        events,
+                        next_key,
+                        _,
+                    ) = await self.store.paginate_room_events_by_topological_ordering(
                         room_id=room_id,
                         from_key=from_token.room_key,
                         to_key=to_room_key,
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 37ee625f71..390cafa8f6 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -71,6 +71,7 @@ user state; this device follows the normal timeout logic (see above) and will
 automatically be replaced with any information from currently available devices.
 
 """
+
 import abc
 import contextlib
 import itertools
@@ -493,9 +494,9 @@ class WorkerPresenceHandler(BasePresenceHandler):
 
         # The number of ongoing syncs on this process, by (user ID, device ID).
         # Empty if _presence_enabled is false.
-        self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = (
-            {}
-        )
+        self._user_device_to_num_current_syncs: Dict[
+            Tuple[str, Optional[str]], int
+        ] = {}
 
         self.notifier = hs.get_notifier()
         self.instance_id = hs.get_instance_id()
@@ -818,9 +819,9 @@ class PresenceHandler(BasePresenceHandler):
 
         # Keeps track of the number of *ongoing* syncs on this process. While
         # this is non zero a user will never go offline.
-        self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = (
-            {}
-        )
+        self._user_device_to_num_current_syncs: Dict[
+            Tuple[str, Optional[str]], int
+        ] = {}
 
         # Keeps track of the number of *ongoing* syncs on other processes.
         #
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 6663d4b271..ac4544ca4c 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -74,6 +74,17 @@ class ProfileHandler:
         self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
 
     async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict:
+        """
+        Get a user's profile as a JSON dictionary.
+
+        Args:
+            user_id: The user to fetch the profile of.
+            ignore_backoff: True to ignore backoff when fetching over federation.
+
+        Returns:
+            A JSON dictionary. For local queries this will include the displayname and avatar_url
+            fields. For remote queries it may contain arbitrary information.
+        """
         target_user = UserID.from_string(user_id)
 
         if self.hs.is_mine(target_user):
@@ -107,6 +118,15 @@ class ProfileHandler:
                 raise e.to_synapse_error()
 
     async def get_displayname(self, target_user: UserID) -> Optional[str]:
+        """
+        Fetch a user's display name from their profile.
+
+        Args:
+            target_user: The user to fetch the display name of.
+
+        Returns:
+            The user's display name or None if unset.
+        """
         if self.hs.is_mine(target_user):
             try:
                 displayname = await self.store.get_profile_displayname(target_user)
@@ -203,6 +223,15 @@ class ProfileHandler:
             await self._update_join_states(requester, target_user)
 
     async def get_avatar_url(self, target_user: UserID) -> Optional[str]:
+        """
+        Fetch a user's avatar URL from their profile.
+
+        Args:
+            target_user: The user to fetch the avatar URL of.
+
+        Returns:
+            The user's avatar URL or None if unset.
+        """
         if self.hs.is_mine(target_user):
             try:
                 avatar_url = await self.store.get_profile_avatar_url(target_user)
@@ -322,9 +351,9 @@ class ProfileHandler:
             server_name = host
 
         if self._is_mine_server_name(server_name):
-            media_info: Optional[Union[LocalMedia, RemoteMedia]] = (
-                await self.store.get_local_media(media_id)
-            )
+            media_info: Optional[
+                Union[LocalMedia, RemoteMedia]
+            ] = await self.store.get_local_media(media_id)
         else:
             media_info = await self.store.get_cached_remote_media(server_name, media_id)
 
@@ -403,6 +432,12 @@ class ProfileHandler:
     async def _update_join_states(
         self, requester: Requester, target_user: UserID
     ) -> None:
+        """
+        Update the membership events of each room the user is joined to with the
+        new profile information.
+
+        Note that this stomps over any custom display name or avatar URL in member events.
+        """
         if not self.hs.is_mine(target_user):
             return
 
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 8674a8fcdd..c776654d12 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -286,8 +286,14 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]):
         room_ids: Iterable[str],
         is_guest: bool,
         explicit_room_id: Optional[str] = None,
+        to_key: Optional[MultiWriterStreamToken] = None,
     ) -> Tuple[List[JsonMapping], MultiWriterStreamToken]:
-        to_key = self.get_current_key()
+        """
+        Find read receipts for given rooms (> `from_token` and <= `to_token`)
+        """
+
+        if to_key is None:
+            to_key = self.get_current_key()
 
         if from_key == to_key:
             return [], to_key
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index efe31e81f9..b1158ee77d 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -188,13 +188,13 @@ class RelationsHandler:
         if include_original_event:
             # Do not bundle aggregations when retrieving the original event because
             # we want the content before relations are applied to it.
-            return_value["original_event"] = (
-                await self._event_serializer.serialize_event(
-                    event,
-                    now,
-                    bundle_aggregations=None,
-                    config=serialize_options,
-                )
+            return_value[
+                "original_event"
+            ] = await self._event_serializer.serialize_event(
+                event,
+                now,
+                bundle_aggregations=None,
+                config=serialize_options,
             )
 
         if next_token:
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 2302d283a7..386375d64b 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -20,6 +20,7 @@
 #
 
 """Contains functions for performing actions on rooms."""
+
 import itertools
 import logging
 import math
@@ -900,11 +901,9 @@ class RoomCreationHandler:
         )
 
         # Check whether this visibility value is blocked by a third party module
-        allowed_by_third_party_rules = (
-            await (
-                self._third_party_event_rules.check_visibility_can_be_modified(
-                    room_id, visibility
-                )
+        allowed_by_third_party_rules = await (
+            self._third_party_event_rules.check_visibility_can_be_modified(
+                room_id, visibility
             )
         )
         if not allowed_by_third_party_rules:
@@ -1188,6 +1187,8 @@ class RoomCreationHandler:
             )
             events_to_send.append((power_event, power_context))
         else:
+            # Please update the docs for `default_power_level_content_override` when
+            # updating the `events` dict below
             power_level_content: JsonDict = {
                 "users": {creator_id: 100},
                 "users_default": 0,
@@ -1748,11 +1749,11 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
                 from_key=from_key,
                 to_key=to_key,
                 limit=limit or 10,
-                order="ASC",
+                direction=Direction.FORWARDS,
             )
 
             events = list(room_events)
-            events.extend(e for evs, _ in room_to_events.values() for e in evs)
+            events.extend(e for evs, _, _ in room_to_events.values() for e in evs)
 
             # We know stream_ordering must be not None here, as its been
             # persisted, but mypy doesn't know that
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 51b9772329..75c60e3c34 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -1302,11 +1302,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
         # If this is going to be a local join, additional information must
         # be included in the event content in order to efficiently validate
         # the event.
-        content[EventContentFields.AUTHORISING_USER] = (
-            await self.event_auth_handler.get_user_which_could_invite(
-                room_id,
-                state_before_join,
-            )
+        content[
+            EventContentFields.AUTHORISING_USER
+        ] = await self.event_auth_handler.get_user_which_could_invite(
+            room_id,
+            state_before_join,
         )
 
         return False, []
@@ -1415,9 +1415,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
 
         if requester is not None:
             sender = UserID.from_string(event.sender)
-            assert (
-                sender == requester.user
-            ), "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
+            assert sender == requester.user, (
+                "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
+            )
             assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
         else:
             requester = types.create_requester(target_user)
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index 720459f1e7..64f5bea014 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -183,8 +183,13 @@ class RoomSummaryHandler:
     ) -> JsonDict:
         """See docstring for SpaceSummaryHandler.get_room_hierarchy."""
 
-        # First of all, check that the room is accessible.
-        if not await self._is_local_room_accessible(requested_room_id, requester):
+        # If the room is available locally, quickly check that the user can access it.
+        local_room = await self._store.is_host_joined(
+            requested_room_id, self._server_name
+        )
+        if local_room and not await self._is_local_room_accessible(
+            requested_room_id, requester
+        ):
             raise UnstableSpecAuthError(
                 403,
                 "User %s not in room %s, and room previews are disabled"
@@ -192,6 +197,22 @@ class RoomSummaryHandler:
                 errcode=Codes.NOT_JOINED,
             )
 
+        if not local_room:
+            room_hierarchy = await self._summarize_remote_room_hierarchy(
+                _RoomQueueEntry(requested_room_id, ()),
+                False,
+            )
+            root_room_entry = room_hierarchy[0]
+            if not root_room_entry or not await self._is_remote_room_accessible(
+                requester, requested_room_id, root_room_entry.room
+            ):
+                raise UnstableSpecAuthError(
+                    403,
+                    "User %s not in room %s, and room previews are disabled"
+                    % (requester, requested_room_id),
+                    errcode=Codes.NOT_JOINED,
+                )
+
         # If this is continuing a previous session, pull the persisted data.
         if from_token:
             try:
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index a7d52fa648..1a71135d5f 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -423,9 +423,9 @@ class SearchHandler:
             }
 
         if search_result.room_groups and "room_id" in group_keys:
-            rooms_cat_res.setdefault("groups", {})[
-                "room_id"
-            ] = search_result.room_groups
+            rooms_cat_res.setdefault("groups", {})["room_id"] = (
+                search_result.room_groups
+            )
 
         if sender_group and "sender" in group_keys:
             rooms_cat_res.setdefault("groups", {})["sender"] = sender_group
diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py
deleted file mode 100644
index a1ddac903e..0000000000
--- a/synapse/handlers/sliding_sync.py
+++ /dev/null
@@ -1,1369 +0,0 @@
-#
-# This file is licensed under the Affero General Public License (AGPL) version 3.
-#
-# Copyright (C) 2024 New Vector, Ltd
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-#
-# See the GNU Affero General Public License for more details:
-# <https://www.gnu.org/licenses/agpl-3.0.html>.
-#
-# Originally licensed under the Apache License, Version 2.0:
-# <http://www.apache.org/licenses/LICENSE-2.0>.
-#
-# [This file includes modifications made by New Vector Limited]
-#
-#
-import logging
-from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Set, Tuple
-
-import attr
-from immutabledict import immutabledict
-
-from synapse.api.constants import (
-    AccountDataTypes,
-    Direction,
-    EventContentFields,
-    EventTypes,
-    Membership,
-)
-from synapse.events import EventBase
-from synapse.events.utils import strip_event
-from synapse.handlers.relations import BundledAggregations
-from synapse.storage.databases.main.stream import CurrentStateDeltaMembership
-from synapse.types import (
-    JsonDict,
-    PersistedEventPosition,
-    Requester,
-    RoomStreamToken,
-    StateMap,
-    StreamKeyType,
-    StreamToken,
-    UserID,
-)
-from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
-from synapse.types.state import StateFilter
-from synapse.visibility import filter_events_for_client
-
-if TYPE_CHECKING:
-    from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-def filter_membership_for_sync(
-    *, membership: str, user_id: str, sender: Optional[str]
-) -> bool:
-    """
-    Returns True if the membership event should be included in the sync response,
-    otherwise False.
-
-    Attributes:
-        membership: The membership state of the user in the room.
-        user_id: The user ID that the membership applies to
-        sender: The person who sent the membership event
-    """
-
-    # Everything except `Membership.LEAVE` because we want everything that's *still*
-    # relevant to the user. There are few more things to include in the sync response
-    # (newly_left) but those are handled separately.
-    #
-    # This logic includes kicks (leave events where the sender is not the same user) and
-    # can be read as "anything that isn't a leave or a leave with a different sender".
-    #
-    # When `sender=None` and `membership=Membership.LEAVE`, it means that a state reset
-    # happened that removed the user from the room, or the user was the last person
-    # locally to leave the room which caused the server to leave the room. In both
-    # cases, we can just remove the rooms since they are no longer relevant to the user.
-    # They could still be added back later if they are `newly_left`.
-    return membership != Membership.LEAVE or sender not in (user_id, None)
-
-
-# We can't freeze this class because we want to update it in place with the
-# de-duplicated data.
-@attr.s(slots=True, auto_attribs=True)
-class RoomSyncConfig:
-    """
-    Holds the config for what data we should fetch for a room in the sync response.
-
-    Attributes:
-        timeline_limit: The maximum number of events to return in the timeline.
-
-        required_state_map: Map from state event type to state_keys requested for the
-            room. The values are close to `StateKey` but actually use a syntax where you
-            can provide `*` wildcard and `$LAZY` for lazy-loading room members.
-    """
-
-    timeline_limit: int
-    required_state_map: Dict[str, Set[str]]
-
-    @classmethod
-    def from_room_config(
-        cls,
-        room_params: SlidingSyncConfig.CommonRoomParameters,
-    ) -> "RoomSyncConfig":
-        """
-        Create a `RoomSyncConfig` from a `SlidingSyncList`/`RoomSubscription` config.
-
-        Args:
-            room_params: `SlidingSyncConfig.SlidingSyncList` or `SlidingSyncConfig.RoomSubscription`
-        """
-        required_state_map: Dict[str, Set[str]] = {}
-        for (
-            state_type,
-            state_key,
-        ) in room_params.required_state:
-            # If we already have a wildcard for this specific `state_key`, we don't need
-            # to add it since the wildcard already covers it.
-            if state_key in required_state_map.get(StateValues.WILDCARD, set()):
-                continue
-
-            # If we already have a wildcard `state_key` for this `state_type`, we don't need
-            # to add anything else
-            if StateValues.WILDCARD in required_state_map.get(state_type, set()):
-                continue
-
-            # If we're getting wildcards for the `state_type` and `state_key`, that's
-            # all that matters so get rid of any other entries
-            if state_type == StateValues.WILDCARD and state_key == StateValues.WILDCARD:
-                required_state_map = {StateValues.WILDCARD: {StateValues.WILDCARD}}
-                # We can break, since we don't need to add anything else
-                break
-
-            # If we're getting a wildcard for the `state_type`, get rid of any other
-            # entries with the same `state_key`, since the wildcard will cover it already.
-            elif state_type == StateValues.WILDCARD:
-                # Get rid of any entries that match the `state_key`
-                #
-                # Make a copy so we don't run into an error: `dictionary changed size
-                # during iteration`, when we remove items
-                for (
-                    existing_state_type,
-                    existing_state_key_set,
-                ) in list(required_state_map.items()):
-                    # Make a copy so we don't run into an error: `Set changed size during
-                    # iteration`, when we filter out and remove items
-                    for existing_state_key in existing_state_key_set.copy():
-                        if existing_state_key == state_key:
-                            existing_state_key_set.remove(state_key)
-
-                    # If we've the left the `set()` empty, remove it from the map
-                    if existing_state_key_set == set():
-                        required_state_map.pop(existing_state_type, None)
-
-            # If we're getting a wildcard `state_key`, get rid of any other state_keys
-            # for this `state_type` since the wildcard will cover it already.
-            if state_key == StateValues.WILDCARD:
-                required_state_map[state_type] = {state_key}
-            # Otherwise, just add it to the set
-            else:
-                if required_state_map.get(state_type) is None:
-                    required_state_map[state_type] = {state_key}
-                else:
-                    required_state_map[state_type].add(state_key)
-
-        return cls(
-            timeline_limit=room_params.timeline_limit,
-            required_state_map=required_state_map,
-        )
-
-    def deep_copy(self) -> "RoomSyncConfig":
-        required_state_map: Dict[str, Set[str]] = {
-            state_type: state_key_set.copy()
-            for state_type, state_key_set in self.required_state_map.items()
-        }
-
-        return RoomSyncConfig(
-            timeline_limit=self.timeline_limit,
-            required_state_map=required_state_map,
-        )
-
-    def combine_room_sync_config(
-        self, other_room_sync_config: "RoomSyncConfig"
-    ) -> None:
-        """
-        Combine this `RoomSyncConfig` with another `RoomSyncConfig` and take the
-        superset union of the two.
-        """
-        # Take the highest timeline limit
-        if self.timeline_limit < other_room_sync_config.timeline_limit:
-            self.timeline_limit = other_room_sync_config.timeline_limit
-
-        # Union the required state
-        for (
-            state_type,
-            state_key_set,
-        ) in other_room_sync_config.required_state_map.items():
-            # If we already have a wildcard for everything, we don't need to add
-            # anything else
-            if StateValues.WILDCARD in self.required_state_map.get(
-                StateValues.WILDCARD, set()
-            ):
-                break
-
-            # If we already have a wildcard `state_key` for this `state_type`, we don't need
-            # to add anything else
-            if StateValues.WILDCARD in self.required_state_map.get(state_type, set()):
-                continue
-
-            # If we're getting wildcards for the `state_type` and `state_key`, that's
-            # all that matters so get rid of any other entries
-            if (
-                state_type == StateValues.WILDCARD
-                and StateValues.WILDCARD in state_key_set
-            ):
-                self.required_state_map = {state_type: {StateValues.WILDCARD}}
-                # We can break, since we don't need to add anything else
-                break
-
-            for state_key in state_key_set:
-                # If we already have a wildcard for this specific `state_key`, we don't need
-                # to add it since the wildcard already covers it.
-                if state_key in self.required_state_map.get(
-                    StateValues.WILDCARD, set()
-                ):
-                    continue
-
-                # If we're getting a wildcard for the `state_type`, get rid of any other
-                # entries with the same `state_key`, since the wildcard will cover it already.
-                if state_type == StateValues.WILDCARD:
-                    # Get rid of any entries that match the `state_key`
-                    #
-                    # Make a copy so we don't run into an error: `dictionary changed size
-                    # during iteration`, when we remove items
-                    for existing_state_type, existing_state_key_set in list(
-                        self.required_state_map.items()
-                    ):
-                        # Make a copy so we don't run into an error: `Set changed size during
-                        # iteration`, when we filter out and remove items
-                        for existing_state_key in existing_state_key_set.copy():
-                            if existing_state_key == state_key:
-                                existing_state_key_set.remove(state_key)
-
-                        # If we've the left the `set()` empty, remove it from the map
-                        if existing_state_key_set == set():
-                            self.required_state_map.pop(existing_state_type, None)
-
-                # If we're getting a wildcard `state_key`, get rid of any other state_keys
-                # for this `state_type` since the wildcard will cover it already.
-                if state_key == StateValues.WILDCARD:
-                    self.required_state_map[state_type] = {state_key}
-                    break
-                # Otherwise, just add it to the set
-                else:
-                    if self.required_state_map.get(state_type) is None:
-                        self.required_state_map[state_type] = {state_key}
-                    else:
-                        self.required_state_map[state_type].add(state_key)
-
-
-class StateValues:
-    """
-    Understood values of the (type, state_key) tuple in `required_state`.
-    """
-
-    # Include all state events of the given type
-    WILDCARD: Final = "*"
-    # Lazy-load room membership events (include room membership events for any event
-    # `sender` in the timeline). We only give special meaning to this value when it's a
-    # `state_key`.
-    LAZY: Final = "$LAZY"
-
-
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class _RoomMembershipForUser:
-    """
-    Attributes:
-        event_id: The event ID of the membership event
-        event_pos: The stream position of the membership event
-        membership: The membership state of the user in the room
-        sender: The person who sent the membership event
-        newly_joined: Whether the user newly joined the room during the given token
-            range
-    """
-
-    event_id: Optional[str]
-    event_pos: PersistedEventPosition
-    membership: str
-    sender: Optional[str]
-    newly_joined: bool
-
-    def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser":
-        return attr.evolve(self, **kwds)
-
-
-class SlidingSyncHandler:
-    def __init__(self, hs: "HomeServer"):
-        self.clock = hs.get_clock()
-        self.store = hs.get_datastores().main
-        self.storage_controllers = hs.get_storage_controllers()
-        self.auth_blocking = hs.get_auth_blocking()
-        self.notifier = hs.get_notifier()
-        self.event_sources = hs.get_event_sources()
-        self.relations_handler = hs.get_relations_handler()
-        self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
-
-    async def wait_for_sync_for_user(
-        self,
-        requester: Requester,
-        sync_config: SlidingSyncConfig,
-        from_token: Optional[StreamToken] = None,
-        timeout_ms: int = 0,
-    ) -> SlidingSyncResult:
-        """
-        Get the sync for a client if we have new data for it now. Otherwise
-        wait for new data to arrive on the server. If the timeout expires, then
-        return an empty sync result.
-
-        Args:
-            requester: The user making the request
-            sync_config: Sync configuration
-            from_token: The point in the stream to sync from. Token of the end of the
-                previous batch. May be `None` if this is the initial sync request.
-            timeout_ms: The time in milliseconds to wait for new data to arrive. If 0,
-                we will immediately but there might not be any new data so we just return an
-                empty response.
-        """
-        # If the user is not part of the mau group, then check that limits have
-        # not been exceeded (if not part of the group by this point, almost certain
-        # auth_blocking will occur)
-        await self.auth_blocking.check_auth_blocking(requester=requester)
-
-        # TODO: If the To-Device extension is enabled and we have a `from_token`, delete
-        # any to-device messages before that token (since we now know that the device
-        # has received them). (see sync v2 for how to do this)
-
-        # If we're working with a user-provided token, we need to make sure to wait for
-        # this worker to catch up with the token so we don't skip past any incoming
-        # events or future events if the user is nefariously, manually modifying the
-        # token.
-        if from_token is not None:
-            # We need to make sure this worker has caught up with the token. If
-            # this returns false, it means we timed out waiting, and we should
-            # just return an empty response.
-            before_wait_ts = self.clock.time_msec()
-            if not await self.notifier.wait_for_stream_token(from_token):
-                logger.warning(
-                    "Timed out waiting for worker to catch up. Returning empty response"
-                )
-                return SlidingSyncResult.empty(from_token)
-
-            # If we've spent significant time waiting to catch up, take it off
-            # the timeout.
-            after_wait_ts = self.clock.time_msec()
-            if after_wait_ts - before_wait_ts > 1_000:
-                timeout_ms -= after_wait_ts - before_wait_ts
-                timeout_ms = max(timeout_ms, 0)
-
-        # We're going to respond immediately if the timeout is 0 or if this is an
-        # initial sync (without a `from_token`) so we can avoid calling
-        # `notifier.wait_for_events()`.
-        if timeout_ms == 0 or from_token is None:
-            now_token = self.event_sources.get_current_token()
-            result = await self.current_sync_for_user(
-                sync_config,
-                from_token=from_token,
-                to_token=now_token,
-            )
-        else:
-            # Otherwise, we wait for something to happen and report it to the user.
-            async def current_sync_callback(
-                before_token: StreamToken, after_token: StreamToken
-            ) -> SlidingSyncResult:
-                return await self.current_sync_for_user(
-                    sync_config,
-                    from_token=from_token,
-                    to_token=after_token,
-                )
-
-            result = await self.notifier.wait_for_events(
-                sync_config.user.to_string(),
-                timeout_ms,
-                current_sync_callback,
-                from_token=from_token,
-            )
-
-        return result
-
-    async def current_sync_for_user(
-        self,
-        sync_config: SlidingSyncConfig,
-        to_token: StreamToken,
-        from_token: Optional[StreamToken] = None,
-    ) -> SlidingSyncResult:
-        """
-        Generates the response body of a Sliding Sync result, represented as a
-        `SlidingSyncResult`.
-
-        We fetch data according to the token range (> `from_token` and <= `to_token`).
-
-        Args:
-            sync_config: Sync configuration
-            to_token: The point in the stream to sync up to.
-            from_token: The point in the stream to sync from. Token of the end of the
-                previous batch. May be `None` if this is the initial sync request.
-        """
-        user_id = sync_config.user.to_string()
-        app_service = self.store.get_app_service_by_user_id(user_id)
-        if app_service:
-            # We no longer support AS users using /sync directly.
-            # See https://github.com/matrix-org/matrix-doc/issues/1144
-            raise NotImplementedError()
-
-        # Assemble sliding window lists
-        lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {}
-        # Keep track of the rooms that we're going to display and need to fetch more
-        # info about
-        relevant_room_map: Dict[str, RoomSyncConfig] = {}
-        if sync_config.lists:
-            # Get all of the room IDs that the user should be able to see in the sync
-            # response
-            sync_room_map = await self.get_sync_room_ids_for_user(
-                sync_config.user,
-                from_token=from_token,
-                to_token=to_token,
-            )
-
-            for list_key, list_config in sync_config.lists.items():
-                # Apply filters
-                filtered_sync_room_map = sync_room_map
-                if list_config.filters is not None:
-                    filtered_sync_room_map = await self.filter_rooms(
-                        sync_config.user, sync_room_map, list_config.filters, to_token
-                    )
-
-                # Sort the list
-                sorted_room_info = await self.sort_rooms(
-                    filtered_sync_room_map, to_token
-                )
-
-                # Find which rooms are partially stated and may need to be filtered out
-                # depending on the `required_state` requested (see below).
-                partial_state_room_map = await self.store.is_partial_state_room_batched(
-                    filtered_sync_room_map.keys()
-                )
-
-                # Since creating the `RoomSyncConfig` takes some work, let's just do it
-                # once and make a copy whenever we need it.
-                room_sync_config = RoomSyncConfig.from_room_config(list_config)
-                membership_state_keys = room_sync_config.required_state_map.get(
-                    EventTypes.Member
-                )
-                lazy_loading = (
-                    membership_state_keys is not None
-                    and len(membership_state_keys) == 1
-                    and StateValues.LAZY in membership_state_keys
-                )
-
-                ops: List[SlidingSyncResult.SlidingWindowList.Operation] = []
-                if list_config.ranges:
-                    for range in list_config.ranges:
-                        room_ids_in_list: List[str] = []
-
-                        # We're going to loop through the sorted list of rooms starting
-                        # at the range start index and keep adding rooms until we fill
-                        # up the range or run out of rooms.
-                        #
-                        # Both sides of range are inclusive so we `+ 1`
-                        max_num_rooms = range[1] - range[0] + 1
-                        for room_id, _ in sorted_room_info[range[0] :]:
-                            if len(room_ids_in_list) >= max_num_rooms:
-                                break
-
-                            # Exclude partially-stated rooms unless the `required_state`
-                            # only has `["m.room.member", "$LAZY"]` for membership
-                            # (lazy-loading room members).
-                            if partial_state_room_map.get(room_id) and not lazy_loading:
-                                continue
-
-                            # Take the superset of the `RoomSyncConfig` for each room.
-                            #
-                            # Update our `relevant_room_map` with the room we're going
-                            # to display and need to fetch more info about.
-                            existing_room_sync_config = relevant_room_map.get(room_id)
-                            if existing_room_sync_config is not None:
-                                existing_room_sync_config.combine_room_sync_config(
-                                    room_sync_config
-                                )
-                            else:
-                                # Make a copy so if we modify it later, it doesn't
-                                # affect all references.
-                                relevant_room_map[room_id] = (
-                                    room_sync_config.deep_copy()
-                                )
-
-                            room_ids_in_list.append(room_id)
-
-                        ops.append(
-                            SlidingSyncResult.SlidingWindowList.Operation(
-                                op=OperationType.SYNC,
-                                range=range,
-                                room_ids=room_ids_in_list,
-                            )
-                        )
-
-                lists[list_key] = SlidingSyncResult.SlidingWindowList(
-                    count=len(sorted_room_info),
-                    ops=ops,
-                )
-
-        # TODO: if (sync_config.room_subscriptions):
-
-        # Fetch room data
-        rooms: Dict[str, SlidingSyncResult.RoomResult] = {}
-        for room_id, room_sync_config in relevant_room_map.items():
-            room_sync_result = await self.get_room_sync_data(
-                user=sync_config.user,
-                room_id=room_id,
-                room_sync_config=room_sync_config,
-                rooms_membership_for_user_at_to_token=sync_room_map[room_id],
-                from_token=from_token,
-                to_token=to_token,
-            )
-
-            rooms[room_id] = room_sync_result
-
-        return SlidingSyncResult(
-            next_pos=to_token,
-            lists=lists,
-            rooms=rooms,
-            extensions={},
-        )
-
-    async def get_sync_room_ids_for_user(
-        self,
-        user: UserID,
-        to_token: StreamToken,
-        from_token: Optional[StreamToken] = None,
-    ) -> Dict[str, _RoomMembershipForUser]:
-        """
-        Fetch room IDs that should be listed for this user in the sync response (the
-        full room list that will be filtered, sorted, and sliced).
-
-        We're looking for rooms where the user has the following state in the token
-        range (> `from_token` and <= `to_token`):
-
-        - `invite`, `join`, `knock`, `ban` membership events
-        - Kicks (`leave` membership events where `sender` is different from the
-          `user_id`/`state_key`)
-        - `newly_left` (rooms that were left during the given token range)
-        - In order for bans/kicks to not show up in sync, you need to `/forget` those
-          rooms. This doesn't modify the event itself though and only adds the
-          `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way
-          to tell when a room was forgotten at the moment so we can't factor it into the
-          from/to range.
-
-        Args:
-            user: User to fetch rooms for
-            to_token: The token to fetch rooms up to.
-            from_token: The point in the stream to sync from.
-
-        Returns:
-            A dictionary of room IDs that should be listed in the sync response along
-            with membership information in that room at the time of `to_token`.
-        """
-        user_id = user.to_string()
-
-        # First grab a current snapshot rooms for the user
-        # (also handles forgotten rooms)
-        room_for_user_list = await self.store.get_rooms_for_local_user_where_membership_is(
-            user_id=user_id,
-            # We want to fetch any kind of membership (joined and left rooms) in order
-            # to get the `event_pos` of the latest room membership event for the
-            # user.
-            #
-            # We will filter out the rooms that don't belong below (see
-            # `filter_membership_for_sync`)
-            membership_list=Membership.LIST,
-            excluded_rooms=self.rooms_to_exclude_globally,
-        )
-
-        # If the user has never joined any rooms before, we can just return an empty list
-        if not room_for_user_list:
-            return {}
-
-        # Our working list of rooms that can show up in the sync response
-        sync_room_id_set = {
-            # Note: The `room_for_user` we're assigning here will need to be fixed up
-            # (below) because they are potentially from the current snapshot time
-            # instead from the time of the `to_token`.
-            room_for_user.room_id: _RoomMembershipForUser(
-                event_id=room_for_user.event_id,
-                event_pos=room_for_user.event_pos,
-                membership=room_for_user.membership,
-                sender=room_for_user.sender,
-                newly_joined=False,
-            )
-            for room_for_user in room_for_user_list
-        }
-
-        # Get the `RoomStreamToken` that represents the spot we queried up to when we got
-        # our membership snapshot from `get_rooms_for_local_user_where_membership_is()`.
-        #
-        # First, we need to get the max stream_ordering of each event persister instance
-        # that we queried events from.
-        instance_to_max_stream_ordering_map: Dict[str, int] = {}
-        for room_for_user in room_for_user_list:
-            instance_name = room_for_user.event_pos.instance_name
-            stream_ordering = room_for_user.event_pos.stream
-
-            current_instance_max_stream_ordering = (
-                instance_to_max_stream_ordering_map.get(instance_name)
-            )
-            if (
-                current_instance_max_stream_ordering is None
-                or stream_ordering > current_instance_max_stream_ordering
-            ):
-                instance_to_max_stream_ordering_map[instance_name] = stream_ordering
-
-        # Then assemble the `RoomStreamToken`
-        membership_snapshot_token = RoomStreamToken(
-            # Minimum position in the `instance_map`
-            stream=min(instance_to_max_stream_ordering_map.values()),
-            instance_map=immutabledict(instance_to_max_stream_ordering_map),
-        )
-
-        # Since we fetched the users room list at some point in time after the from/to
-        # tokens, we need to revert/rewind some membership changes to match the point in
-        # time of the `to_token`. In particular, we need to make these fixups:
-        #
-        # - 1a) Remove rooms that the user joined after the `to_token`
-        # - 1b) Add back rooms that the user left after the `to_token`
-        # - 1c) Update room membership events to the point in time of the `to_token`
-        # - 2) Add back newly_left rooms (> `from_token` and <= `to_token`)
-        # - 3) Figure out which rooms are `newly_joined`
-
-        # 1) -----------------------------------------------------
-
-        # 1) Fetch membership changes that fall in the range from `to_token` up to
-        # `membership_snapshot_token`
-        #
-        # If our `to_token` is already the same or ahead of the latest room membership
-        # for the user, we don't need to do any "2)" fix-ups and can just straight-up
-        # use the room list from the snapshot as a base (nothing has changed)
-        current_state_delta_membership_changes_after_to_token = []
-        if not membership_snapshot_token.is_before_or_eq(to_token.room_key):
-            current_state_delta_membership_changes_after_to_token = (
-                await self.store.get_current_state_delta_membership_changes_for_user(
-                    user_id,
-                    from_key=to_token.room_key,
-                    to_key=membership_snapshot_token,
-                    excluded_room_ids=self.rooms_to_exclude_globally,
-                )
-            )
-
-        # 1) Assemble a list of the first membership event after the `to_token` so we can
-        # step backward to the previous membership that would apply to the from/to
-        # range.
-        first_membership_change_by_room_id_after_to_token: Dict[
-            str, CurrentStateDeltaMembership
-        ] = {}
-        for membership_change in current_state_delta_membership_changes_after_to_token:
-            # Only set if we haven't already set it
-            first_membership_change_by_room_id_after_to_token.setdefault(
-                membership_change.room_id, membership_change
-            )
-
-        # 1) Fixup
-        #
-        # Since we fetched a snapshot of the users room list at some point in time after
-        # the from/to tokens, we need to revert/rewind some membership changes to match
-        # the point in time of the `to_token`.
-        for (
-            room_id,
-            first_membership_change_after_to_token,
-        ) in first_membership_change_by_room_id_after_to_token.items():
-            # 1a) Remove rooms that the user joined after the `to_token`
-            if first_membership_change_after_to_token.prev_event_id is None:
-                sync_room_id_set.pop(room_id, None)
-            # 1b) 1c) From the first membership event after the `to_token`, step backward to the
-            # previous membership that would apply to the from/to range.
-            else:
-                # We don't expect these fields to be `None` if we have a `prev_event_id`
-                # but we're being defensive since it's possible that the prev event was
-                # culled from the database.
-                if (
-                    first_membership_change_after_to_token.prev_event_pos is not None
-                    and first_membership_change_after_to_token.prev_membership
-                    is not None
-                ):
-                    sync_room_id_set[room_id] = _RoomMembershipForUser(
-                        event_id=first_membership_change_after_to_token.prev_event_id,
-                        event_pos=first_membership_change_after_to_token.prev_event_pos,
-                        membership=first_membership_change_after_to_token.prev_membership,
-                        sender=first_membership_change_after_to_token.prev_sender,
-                        newly_joined=False,
-                    )
-                else:
-                    # If we can't find the previous membership event, we shouldn't
-                    # include the room in the sync response since we can't determine the
-                    # exact membership state and shouldn't rely on the current snapshot.
-                    sync_room_id_set.pop(room_id, None)
-
-        # Filter the rooms that that we have updated room membership events to the point
-        # in time of the `to_token` (from the "1)" fixups)
-        filtered_sync_room_id_set = {
-            room_id: room_membership_for_user
-            for room_id, room_membership_for_user in sync_room_id_set.items()
-            if filter_membership_for_sync(
-                membership=room_membership_for_user.membership,
-                user_id=user_id,
-                sender=room_membership_for_user.sender,
-            )
-        }
-
-        # 2) -----------------------------------------------------
-        # We fix-up newly_left rooms after the first fixup because it may have removed
-        # some left rooms that we can figure out are newly_left in the following code
-
-        # 2) Fetch membership changes that fall in the range from `from_token` up to `to_token`
-        current_state_delta_membership_changes_in_from_to_range = []
-        if from_token:
-            current_state_delta_membership_changes_in_from_to_range = (
-                await self.store.get_current_state_delta_membership_changes_for_user(
-                    user_id,
-                    from_key=from_token.room_key,
-                    to_key=to_token.room_key,
-                    excluded_room_ids=self.rooms_to_exclude_globally,
-                )
-            )
-
-        # 2) Assemble a list of the last membership events in some given ranges. Someone
-        # could have left and joined multiple times during the given range but we only
-        # care about end-result so we grab the last one.
-        last_membership_change_by_room_id_in_from_to_range: Dict[
-            str, CurrentStateDeltaMembership
-        ] = {}
-        # We also want to assemble a list of the first membership events during the token
-        # range so we can step backward to the previous membership that would apply to
-        # before the token range to see if we have `newly_joined` the room.
-        first_membership_change_by_room_id_in_from_to_range: Dict[
-            str, CurrentStateDeltaMembership
-        ] = {}
-        # Keep track if the room has a non-join event in the token range so we can later
-        # tell if it was a `newly_joined` room. If the last membership event in the
-        # token range is a join and there is also some non-join in the range, we know
-        # they `newly_joined`.
-        has_non_join_event_by_room_id_in_from_to_range: Dict[str, bool] = {}
-        for (
-            membership_change
-        ) in current_state_delta_membership_changes_in_from_to_range:
-            room_id = membership_change.room_id
-
-            last_membership_change_by_room_id_in_from_to_range[room_id] = (
-                membership_change
-            )
-            # Only set if we haven't already set it
-            first_membership_change_by_room_id_in_from_to_range.setdefault(
-                room_id, membership_change
-            )
-
-            if membership_change.membership != Membership.JOIN:
-                has_non_join_event_by_room_id_in_from_to_range[room_id] = True
-
-        # 2) Fixup
-        #
-        # 3) We also want to assemble a list of possibly newly joined rooms. Someone
-        # could have left and joined multiple times during the given range but we only
-        # care about whether they are joined at the end of the token range so we are
-        # working with the last membership even in the token range.
-        possibly_newly_joined_room_ids = set()
-        for (
-            last_membership_change_in_from_to_range
-        ) in last_membership_change_by_room_id_in_from_to_range.values():
-            room_id = last_membership_change_in_from_to_range.room_id
-
-            # 3)
-            if last_membership_change_in_from_to_range.membership == Membership.JOIN:
-                possibly_newly_joined_room_ids.add(room_id)
-
-            # 2) Add back newly_left rooms (> `from_token` and <= `to_token`). We
-            # include newly_left rooms because the last event that the user should see
-            # is their own leave event
-            if last_membership_change_in_from_to_range.membership == Membership.LEAVE:
-                filtered_sync_room_id_set[room_id] = _RoomMembershipForUser(
-                    event_id=last_membership_change_in_from_to_range.event_id,
-                    event_pos=last_membership_change_in_from_to_range.event_pos,
-                    membership=last_membership_change_in_from_to_range.membership,
-                    sender=last_membership_change_in_from_to_range.sender,
-                    newly_joined=False,
-                )
-
-        # 3) Figure out `newly_joined`
-        for room_id in possibly_newly_joined_room_ids:
-            has_non_join_in_from_to_range = (
-                has_non_join_event_by_room_id_in_from_to_range.get(room_id, False)
-            )
-            # If the last membership event in the token range is a join and there is
-            # also some non-join in the range, we know they `newly_joined`.
-            if has_non_join_in_from_to_range:
-                # We found a `newly_joined` room (we left and joined within the token range)
-                filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[
-                    room_id
-                ].copy_and_replace(newly_joined=True)
-            else:
-                prev_event_id = first_membership_change_by_room_id_in_from_to_range[
-                    room_id
-                ].prev_event_id
-                prev_membership = first_membership_change_by_room_id_in_from_to_range[
-                    room_id
-                ].prev_membership
-
-                if prev_event_id is None:
-                    # We found a `newly_joined` room (we are joining the room for the
-                    # first time within the token range)
-                    filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[
-                        room_id
-                    ].copy_and_replace(newly_joined=True)
-                # Last resort, we need to step back to the previous membership event
-                # just before the token range to see if we're joined then or not.
-                elif prev_membership != Membership.JOIN:
-                    # We found a `newly_joined` room (we left before the token range
-                    # and joined within the token range)
-                    filtered_sync_room_id_set[room_id] = filtered_sync_room_id_set[
-                        room_id
-                    ].copy_and_replace(newly_joined=True)
-
-        return filtered_sync_room_id_set
-
-    async def filter_rooms(
-        self,
-        user: UserID,
-        sync_room_map: Dict[str, _RoomMembershipForUser],
-        filters: SlidingSyncConfig.SlidingSyncList.Filters,
-        to_token: StreamToken,
-    ) -> Dict[str, _RoomMembershipForUser]:
-        """
-        Filter rooms based on the sync request.
-
-        Args:
-            user: User to filter rooms for
-            sync_room_map: Dictionary of room IDs to sort along with membership
-                information in the room at the time of `to_token`.
-            filters: Filters to apply
-            to_token: We filter based on the state of the room at this token
-
-        Returns:
-            A filtered dictionary of room IDs along with membership information in the
-            room at the time of `to_token`.
-        """
-        user_id = user.to_string()
-
-        # TODO: Apply filters
-
-        filtered_room_id_set = set(sync_room_map.keys())
-
-        # Filter for Direct-Message (DM) rooms
-        if filters.is_dm is not None:
-            # We're using global account data (`m.direct`) instead of checking for
-            # `is_direct` on membership events because that property only appears for
-            # the invitee membership event (doesn't show up for the inviter). Account
-            # data is set by the client so it needs to be scrutinized.
-            #
-            # We're unable to take `to_token` into account for global account data since
-            # we only keep track of the latest account data for the user.
-            dm_map = await self.store.get_global_account_data_by_type_for_user(
-                user_id, AccountDataTypes.DIRECT
-            )
-
-            # Flatten out the map
-            dm_room_id_set = set()
-            if isinstance(dm_map, dict):
-                for room_ids in dm_map.values():
-                    # Account data should be a list of room IDs. Ignore anything else
-                    if isinstance(room_ids, list):
-                        for room_id in room_ids:
-                            if isinstance(room_id, str):
-                                dm_room_id_set.add(room_id)
-
-            if filters.is_dm:
-                # Only DM rooms please
-                filtered_room_id_set = filtered_room_id_set.intersection(dm_room_id_set)
-            else:
-                # Only non-DM rooms please
-                filtered_room_id_set = filtered_room_id_set.difference(dm_room_id_set)
-
-        if filters.spaces:
-            raise NotImplementedError()
-
-        # Filter for encrypted rooms
-        if filters.is_encrypted is not None:
-            # Make a copy so we don't run into an error: `Set changed size during
-            # iteration`, when we filter out and remove items
-            for room_id in filtered_room_id_set.copy():
-                state_at_to_token = await self.storage_controllers.state.get_state_at(
-                    room_id,
-                    to_token,
-                    state_filter=StateFilter.from_types(
-                        [(EventTypes.RoomEncryption, "")]
-                    ),
-                    # Partially-stated rooms should have all state events except for the
-                    # membership events so we don't need to wait because we only care
-                    # about retrieving the `EventTypes.RoomEncryption` state event here.
-                    # Plus we don't want to block the whole sync waiting for this one
-                    # room.
-                    await_full_state=False,
-                )
-                is_encrypted = state_at_to_token.get((EventTypes.RoomEncryption, ""))
-
-                # If we're looking for encrypted rooms, filter out rooms that are not
-                # encrypted and vice versa
-                if (filters.is_encrypted and not is_encrypted) or (
-                    not filters.is_encrypted and is_encrypted
-                ):
-                    filtered_room_id_set.remove(room_id)
-
-        # Filter for rooms that the user has been invited to
-        if filters.is_invite is not None:
-            # Make a copy so we don't run into an error: `Set changed size during
-            # iteration`, when we filter out and remove items
-            for room_id in filtered_room_id_set.copy():
-                room_for_user = sync_room_map[room_id]
-                # If we're looking for invite rooms, filter out rooms that the user is
-                # not invited to and vice versa
-                if (
-                    filters.is_invite and room_for_user.membership != Membership.INVITE
-                ) or (
-                    not filters.is_invite
-                    and room_for_user.membership == Membership.INVITE
-                ):
-                    filtered_room_id_set.remove(room_id)
-
-        # Filter by room type (space vs room, etc). A room must match one of the types
-        # provided in the list. `None` is a valid type for rooms which do not have a
-        # room type.
-        if filters.room_types is not None or filters.not_room_types is not None:
-            # Make a copy so we don't run into an error: `Set changed size during
-            # iteration`, when we filter out and remove items
-            for room_id in filtered_room_id_set.copy():
-                create_event = await self.store.get_create_event_for_room(room_id)
-                room_type = create_event.content.get(EventContentFields.ROOM_TYPE)
-                if (
-                    filters.room_types is not None
-                    and room_type not in filters.room_types
-                ):
-                    filtered_room_id_set.remove(room_id)
-
-                if (
-                    filters.not_room_types is not None
-                    and room_type in filters.not_room_types
-                ):
-                    filtered_room_id_set.remove(room_id)
-
-        if filters.room_name_like:
-            raise NotImplementedError()
-
-        if filters.tags:
-            raise NotImplementedError()
-
-        if filters.not_tags:
-            raise NotImplementedError()
-
-        # Assemble a new sync room map but only with the `filtered_room_id_set`
-        return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set}
-
-    async def sort_rooms(
-        self,
-        sync_room_map: Dict[str, _RoomMembershipForUser],
-        to_token: StreamToken,
-    ) -> List[Tuple[str, _RoomMembershipForUser]]:
-        """
-        Sort by `stream_ordering` of the last event that the user should see in the
-        room. `stream_ordering` is unique so we get a stable sort.
-
-        Args:
-            sync_room_map: Dictionary of room IDs to sort along with membership
-                information in the room at the time of `to_token`.
-            to_token: We sort based on the events in the room at this token (<= `to_token`)
-
-        Returns:
-            A sorted list of room IDs by `stream_ordering` along with membership information.
-        """
-
-        # Assemble a map of room ID to the `stream_ordering` of the last activity that the
-        # user should see in the room (<= `to_token`)
-        last_activity_in_room_map: Dict[str, int] = {}
-        for room_id, room_for_user in sync_room_map.items():
-            # If they are fully-joined to the room, let's find the latest activity
-            # at/before the `to_token`.
-            if room_for_user.membership == Membership.JOIN:
-                last_event_result = (
-                    await self.store.get_last_event_pos_in_room_before_stream_ordering(
-                        room_id, to_token.room_key
-                    )
-                )
-
-                # If the room has no events at/before the `to_token`, this is probably a
-                # mistake in the code that generates the `sync_room_map` since that should
-                # only give us rooms that the user had membership in during the token range.
-                assert last_event_result is not None
-
-                _, event_pos = last_event_result
-
-                last_activity_in_room_map[room_id] = event_pos.stream
-            else:
-                # Otherwise, if the user has left/been invited/knocked/been banned from
-                # a room, they shouldn't see anything past that point.
-                last_activity_in_room_map[room_id] = room_for_user.event_pos.stream
-
-        return sorted(
-            sync_room_map.items(),
-            # Sort by the last activity (stream_ordering) in the room
-            key=lambda room_info: last_activity_in_room_map[room_info[0]],
-            # We want descending order
-            reverse=True,
-        )
-
-    async def get_room_sync_data(
-        self,
-        user: UserID,
-        room_id: str,
-        room_sync_config: RoomSyncConfig,
-        rooms_membership_for_user_at_to_token: _RoomMembershipForUser,
-        from_token: Optional[StreamToken],
-        to_token: StreamToken,
-    ) -> SlidingSyncResult.RoomResult:
-        """
-        Fetch room data for the sync response.
-
-        We fetch data according to the token range (> `from_token` and <= `to_token`).
-
-        Args:
-            user: User to fetch data for
-            room_id: The room ID to fetch data for
-            room_sync_config: Config for what data we should fetch for a room in the
-                sync response.
-            rooms_membership_for_user_at_to_token: Membership information for the user
-                in the room at the time of `to_token`.
-            from_token: The point in the stream to sync from.
-            to_token: The point in the stream to sync up to.
-        """
-
-        # Assemble the list of timeline events
-        #
-        # FIXME: It would be nice to make the `rooms` response more uniform regardless of
-        # membership. Currently, we have to make all of these optional because
-        # `invite`/`knock` rooms only have `stripped_state`. See
-        # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932
-        timeline_events: Optional[List[EventBase]] = None
-        bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None
-        limited: Optional[bool] = None
-        prev_batch_token: Optional[StreamToken] = None
-        num_live: Optional[int] = None
-        if (
-            room_sync_config.timeline_limit > 0
-            # No timeline for invite/knock rooms (just `stripped_state`)
-            and rooms_membership_for_user_at_to_token.membership
-            not in (Membership.INVITE, Membership.KNOCK)
-        ):
-            limited = False
-            # We want to start off using the `to_token` (vs `from_token`) because we look
-            # backwards from the `to_token` up to the `timeline_limit` and we might not
-            # reach the `from_token` before we hit the limit. We will update the room stream
-            # position once we've fetched the events to point to the earliest event fetched.
-            prev_batch_token = to_token
-
-            # We're going to paginate backwards from the `to_token`
-            from_bound = to_token.room_key
-            # People shouldn't see past their leave/ban event
-            if rooms_membership_for_user_at_to_token.membership in (
-                Membership.LEAVE,
-                Membership.BAN,
-            ):
-                from_bound = (
-                    rooms_membership_for_user_at_to_token.event_pos.to_room_stream_token()
-                )
-
-            # Determine whether we should limit the timeline to the token range.
-            #
-            # We should return historical messages (before token range) in the
-            # following cases because we want clients to be able to show a basic
-            # screen of information:
-            #  - Initial sync (because no `from_token` to limit us anyway)
-            #  - When users `newly_joined`
-            #  - TODO: For an incremental sync where we haven't sent it down this
-            #    connection before
-            to_bound = (
-                from_token.room_key
-                if from_token is not None
-                and not rooms_membership_for_user_at_to_token.newly_joined
-                else None
-            )
-
-            timeline_events, new_room_key = await self.store.paginate_room_events(
-                room_id=room_id,
-                from_key=from_bound,
-                to_key=to_bound,
-                direction=Direction.BACKWARDS,
-                # We add one so we can determine if there are enough events to saturate
-                # the limit or not (see `limited`)
-                limit=room_sync_config.timeline_limit + 1,
-                event_filter=None,
-            )
-
-            # We want to return the events in ascending order (the last event is the
-            # most recent).
-            timeline_events.reverse()
-
-            # Determine our `limited` status based on the timeline. We do this before
-            # filtering the events so we can accurately determine if there is more to
-            # paginate even if we filter out some/all events.
-            if len(timeline_events) > room_sync_config.timeline_limit:
-                limited = True
-                # Get rid of that extra "+ 1" event because we only used it to determine
-                # if we hit the limit or not
-                timeline_events = timeline_events[-room_sync_config.timeline_limit :]
-                assert timeline_events[0].internal_metadata.stream_ordering
-                new_room_key = RoomStreamToken(
-                    stream=timeline_events[0].internal_metadata.stream_ordering - 1
-                )
-
-            # Make sure we don't expose any events that the client shouldn't see
-            timeline_events = await filter_events_for_client(
-                self.storage_controllers,
-                user.to_string(),
-                timeline_events,
-                is_peeking=rooms_membership_for_user_at_to_token.membership
-                != Membership.JOIN,
-                filter_send_to_client=True,
-            )
-            # TODO: Filter out `EventTypes.CallInvite` in public rooms,
-            # see https://github.com/element-hq/synapse/issues/17359
-
-            # TODO: Handle timeline gaps (`get_timeline_gaps()`)
-
-            # Determine how many "live" events we have (events within the given token range).
-            #
-            # This is mostly useful to determine whether a given @mention event should
-            # make a noise or not. Clients cannot rely solely on the absence of
-            # `initial: true` to determine live events because if a room not in the
-            # sliding window bumps into the window because of an @mention it will have
-            # `initial: true` yet contain a single live event (with potentially other
-            # old events in the timeline)
-            num_live = 0
-            if from_token is not None:
-                for timeline_event in reversed(timeline_events):
-                    # This fields should be present for all persisted events
-                    assert timeline_event.internal_metadata.stream_ordering is not None
-                    assert timeline_event.internal_metadata.instance_name is not None
-
-                    persisted_position = PersistedEventPosition(
-                        instance_name=timeline_event.internal_metadata.instance_name,
-                        stream=timeline_event.internal_metadata.stream_ordering,
-                    )
-                    if persisted_position.persisted_after(from_token.room_key):
-                        num_live += 1
-                    else:
-                        # Since we're iterating over the timeline events in
-                        # reverse-chronological order, we can break once we hit an event
-                        # that's not live. In the future, we could potentially optimize
-                        # this more with a binary search (bisect).
-                        break
-
-            # If the timeline is `limited=True`, the client does not have all events
-            # necessary to calculate aggregations themselves.
-            if limited:
-                bundled_aggregations = (
-                    await self.relations_handler.get_bundled_aggregations(
-                        timeline_events, user.to_string()
-                    )
-                )
-
-            # Update the `prev_batch_token` to point to the position that allows us to
-            # keep paginating backwards from the oldest event we return in the timeline.
-            prev_batch_token = prev_batch_token.copy_and_replace(
-                StreamKeyType.ROOM, new_room_key
-            )
-
-        # Figure out any stripped state events for invite/knocks. This allows the
-        # potential joiner to identify the room.
-        stripped_state: List[JsonDict] = []
-        if rooms_membership_for_user_at_to_token.membership in (
-            Membership.INVITE,
-            Membership.KNOCK,
-        ):
-            # This should never happen. If someone is invited/knocked on room, then
-            # there should be an event for it.
-            assert rooms_membership_for_user_at_to_token.event_id is not None
-
-            invite_or_knock_event = await self.store.get_event(
-                rooms_membership_for_user_at_to_token.event_id
-            )
-
-            stripped_state = []
-            if invite_or_knock_event.membership == Membership.INVITE:
-                stripped_state.extend(
-                    invite_or_knock_event.unsigned.get("invite_room_state", [])
-                )
-            elif invite_or_knock_event.membership == Membership.KNOCK:
-                stripped_state.extend(
-                    invite_or_knock_event.unsigned.get("knock_room_state", [])
-                )
-
-            stripped_state.append(strip_event(invite_or_knock_event))
-
-        # TODO: Handle state resets. For example, if we see
-        # `rooms_membership_for_user_at_to_token.membership = Membership.LEAVE` but
-        # `required_state` doesn't include it, we should indicate to the client that a
-        # state reset happened. Perhaps we should indicate this by setting `initial:
-        # True` and empty `required_state`.
-
-        # TODO: Since we can't determine whether we've already sent a room down this
-        # Sliding Sync connection before (we plan to add this optimization in the
-        # future), we're always returning the requested room state instead of
-        # updates.
-        initial = True
-
-        # Fetch the required state for the room
-        #
-        # No `required_state` for invite/knock rooms (just `stripped_state`)
-        #
-        # FIXME: It would be nice to make the `rooms` response more uniform regardless
-        # of membership. Currently, we have to make this optional because
-        # `invite`/`knock` rooms only have `stripped_state`. See
-        # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932
-        room_state: Optional[StateMap[EventBase]] = None
-        if rooms_membership_for_user_at_to_token.membership not in (
-            Membership.INVITE,
-            Membership.KNOCK,
-        ):
-            # Calculate the `StateFilter` based on the `required_state` for the room
-            state_filter: Optional[StateFilter] = StateFilter.none()
-            # If we have a double wildcard ("*", "*") in the `required_state`, we need
-            # to fetch all state for the room
-            #
-            # Note: MSC3575 describes different behavior to how we're handling things
-            # here but since it's not wrong to return more state than requested
-            # (`required_state` is just the minimum requested), it doesn't matter if we
-            # include more than client wanted. This complexity is also under scrutiny,
-            # see
-            # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1185109050
-            #
-            # > One unique exception is when you request all state events via ["*", "*"]. When used,
-            # > all state events are returned by default, and additional entries FILTER OUT the returned set
-            # > of state events. These additional entries cannot use '*' themselves.
-            # > For example, ["*", "*"], ["m.room.member", "@alice:example.com"] will _exclude_ every m.room.member
-            # > event _except_ for @alice:example.com, and include every other state event.
-            # > In addition, ["*", "*"], ["m.space.child", "*"] is an error, the m.space.child filter is not
-            # > required as it would have been returned anyway.
-            # >
-            # > -- MSC3575 (https://github.com/matrix-org/matrix-spec-proposals/pull/3575)
-            if StateValues.WILDCARD in room_sync_config.required_state_map.get(
-                StateValues.WILDCARD, set()
-            ):
-                state_filter = StateFilter.all()
-            # TODO: `StateFilter` currently doesn't support wildcard event types. We're
-            # currently working around this by returning all state to the client but it
-            # would be nice to fetch less from the database and return just what the
-            # client wanted.
-            elif (
-                room_sync_config.required_state_map.get(StateValues.WILDCARD)
-                is not None
-            ):
-                state_filter = StateFilter.all()
-            else:
-                required_state_types: List[Tuple[str, Optional[str]]] = []
-                for (
-                    state_type,
-                    state_key_set,
-                ) in room_sync_config.required_state_map.items():
-                    for state_key in state_key_set:
-                        if state_key == StateValues.WILDCARD:
-                            # `None` is a wildcard in the `StateFilter`
-                            required_state_types.append((state_type, None))
-                        # We need to fetch all relevant people when we're lazy-loading membership
-                        elif (
-                            state_type == EventTypes.Member
-                            and state_key == StateValues.LAZY
-                        ):
-                            # Everyone in the timeline is relevant
-                            timeline_membership: Set[str] = set()
-                            if timeline_events is not None:
-                                for timeline_event in timeline_events:
-                                    timeline_membership.add(timeline_event.sender)
-
-                            for user_id in timeline_membership:
-                                required_state_types.append(
-                                    (EventTypes.Member, user_id)
-                                )
-
-                            # FIXME: We probably also care about invite, ban, kick, targets, etc
-                            # but the spec only mentions "senders".
-                        else:
-                            required_state_types.append((state_type, state_key))
-
-                state_filter = StateFilter.from_types(required_state_types)
-
-            # We can skip fetching state if we don't need any
-            if state_filter != StateFilter.none():
-                # We can return all of the state that was requested if we're doing an
-                # initial sync
-                if initial:
-                    # People shouldn't see past their leave/ban event
-                    if rooms_membership_for_user_at_to_token.membership in (
-                        Membership.LEAVE,
-                        Membership.BAN,
-                    ):
-                        room_state = await self.storage_controllers.state.get_state_at(
-                            room_id,
-                            stream_position=to_token.copy_and_replace(
-                                StreamKeyType.ROOM,
-                                rooms_membership_for_user_at_to_token.event_pos.to_room_stream_token(),
-                            ),
-                            state_filter=state_filter,
-                            # Partially-stated rooms should have all state events except for
-                            # the membership events and since we've already excluded
-                            # partially-stated rooms unless `required_state` only has
-                            # `["m.room.member", "$LAZY"]` for membership, we should be able
-                            # to retrieve everything requested. Plus we don't want to block
-                            # the whole sync waiting for this one room.
-                            await_full_state=False,
-                        )
-                    # Otherwise, we can get the latest current state in the room
-                    else:
-                        room_state = await self.storage_controllers.state.get_current_state(
-                            room_id,
-                            state_filter,
-                            # Partially-stated rooms should have all state events except for
-                            # the membership events and since we've already excluded
-                            # partially-stated rooms unless `required_state` only has
-                            # `["m.room.member", "$LAZY"]` for membership, we should be able
-                            # to retrieve everything requested. Plus we don't want to block
-                            # the whole sync waiting for this one room.
-                            await_full_state=False,
-                        )
-                        # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token`
-                else:
-                    # TODO: Once we can figure out if we've sent a room down this connection before,
-                    # we can return updates instead of the full required state.
-                    raise NotImplementedError()
-
-        return SlidingSyncResult.RoomResult(
-            # TODO: Dummy value
-            name=None,
-            # TODO: Dummy value
-            avatar=None,
-            # TODO: Dummy value
-            heroes=None,
-            # TODO: Dummy value
-            is_dm=False,
-            initial=initial,
-            required_state=list(room_state.values()) if room_state else None,
-            timeline_events=timeline_events,
-            bundled_aggregations=bundled_aggregations,
-            stripped_state=stripped_state,
-            prev_batch=prev_batch_token,
-            limited=limited,
-            num_live=num_live,
-            # TODO: Dummy values
-            joined_count=0,
-            invited_count=0,
-            # TODO: These are just dummy values. We could potentially just remove these
-            # since notifications can only really be done correctly on the client anyway
-            # (encrypted rooms).
-            notification_count=0,
-            highlight_count=0,
-        )
diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py
new file mode 100644
index 0000000000..a1a6728fb9
--- /dev/null
+++ b/synapse/handlers/sliding_sync/__init__.py
@@ -0,0 +1,1499 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2023 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+import logging
+from itertools import chain
+from typing import TYPE_CHECKING, AbstractSet, Dict, List, Mapping, Optional, Set, Tuple
+
+from prometheus_client import Histogram
+from typing_extensions import assert_never
+
+from synapse.api.constants import Direction, EventTypes, Membership
+from synapse.events import EventBase
+from synapse.events.utils import strip_event
+from synapse.handlers.relations import BundledAggregations
+from synapse.handlers.sliding_sync.extensions import SlidingSyncExtensionHandler
+from synapse.handlers.sliding_sync.room_lists import (
+    RoomsForUserType,
+    SlidingSyncRoomLists,
+)
+from synapse.handlers.sliding_sync.store import SlidingSyncConnectionStore
+from synapse.logging.opentracing import (
+    SynapseTags,
+    log_kv,
+    set_tag,
+    start_active_span,
+    tag_args,
+    trace,
+)
+from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
+from synapse.storage.databases.main.stream import PaginateFunction
+from synapse.storage.roommember import (
+    MemberSummary,
+)
+from synapse.types import (
+    JsonDict,
+    MutableStateMap,
+    PersistedEventPosition,
+    Requester,
+    SlidingSyncStreamToken,
+    StateMap,
+    StrCollection,
+    StreamKeyType,
+    StreamToken,
+)
+from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES
+from synapse.types.handlers.sliding_sync import (
+    HaveSentRoomFlag,
+    MutablePerConnectionState,
+    PerConnectionState,
+    RoomSyncConfig,
+    SlidingSyncConfig,
+    SlidingSyncResult,
+    StateValues,
+)
+from synapse.types.state import StateFilter
+from synapse.util.async_helpers import concurrently_execute
+from synapse.visibility import filter_events_for_client
+
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+sync_processing_time = Histogram(
+    "synapse_sliding_sync_processing_time",
+    "Time taken to generate a sliding sync response, ignoring wait times.",
+    ["initial"],
+)
+
+
+class SlidingSyncHandler:
+    def __init__(self, hs: "HomeServer"):
+        self.clock = hs.get_clock()
+        self.store = hs.get_datastores().main
+        self.storage_controllers = hs.get_storage_controllers()
+        self.auth_blocking = hs.get_auth_blocking()
+        self.notifier = hs.get_notifier()
+        self.event_sources = hs.get_event_sources()
+        self.relations_handler = hs.get_relations_handler()
+        self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
+        self.is_mine_id = hs.is_mine_id
+
+        self.connection_store = SlidingSyncConnectionStore(self.store)
+        self.extensions = SlidingSyncExtensionHandler(hs)
+        self.room_lists = SlidingSyncRoomLists(hs)
+
+    async def wait_for_sync_for_user(
+        self,
+        requester: Requester,
+        sync_config: SlidingSyncConfig,
+        from_token: Optional[SlidingSyncStreamToken] = None,
+        timeout_ms: int = 0,
+    ) -> SlidingSyncResult:
+        """
+        Get the sync for a client if we have new data for it now. Otherwise
+        wait for new data to arrive on the server. If the timeout expires, then
+        return an empty sync result.
+
+        Args:
+            requester: The user making the request
+            sync_config: Sync configuration
+            from_token: The point in the stream to sync from. Token of the end of the
+                previous batch. May be `None` if this is the initial sync request.
+            timeout_ms: The time in milliseconds to wait for new data to arrive. If 0,
+                we will immediately but there might not be any new data so we just return an
+                empty response.
+        """
+        # If the user is not part of the mau group, then check that limits have
+        # not been exceeded (if not part of the group by this point, almost certain
+        # auth_blocking will occur)
+        await self.auth_blocking.check_auth_blocking(requester=requester)
+
+        # If we're working with a user-provided token, we need to make sure to wait for
+        # this worker to catch up with the token so we don't skip past any incoming
+        # events or future events if the user is nefariously, manually modifying the
+        # token.
+        if from_token is not None:
+            # We need to make sure this worker has caught up with the token. If
+            # this returns false, it means we timed out waiting, and we should
+            # just return an empty response.
+            before_wait_ts = self.clock.time_msec()
+            if not await self.notifier.wait_for_stream_token(from_token.stream_token):
+                logger.warning(
+                    "Timed out waiting for worker to catch up. Returning empty response"
+                )
+                return SlidingSyncResult.empty(from_token)
+
+            # If we've spent significant time waiting to catch up, take it off
+            # the timeout.
+            after_wait_ts = self.clock.time_msec()
+            if after_wait_ts - before_wait_ts > 1_000:
+                timeout_ms -= after_wait_ts - before_wait_ts
+                timeout_ms = max(timeout_ms, 0)
+
+        # We're going to respond immediately if the timeout is 0 or if this is an
+        # initial sync (without a `from_token`) so we can avoid calling
+        # `notifier.wait_for_events()`.
+        if timeout_ms == 0 or from_token is None:
+            now_token = self.event_sources.get_current_token()
+            result = await self.current_sync_for_user(
+                sync_config,
+                from_token=from_token,
+                to_token=now_token,
+            )
+        else:
+            # Otherwise, we wait for something to happen and report it to the user.
+            async def current_sync_callback(
+                before_token: StreamToken, after_token: StreamToken
+            ) -> SlidingSyncResult:
+                return await self.current_sync_for_user(
+                    sync_config,
+                    from_token=from_token,
+                    to_token=after_token,
+                )
+
+            result = await self.notifier.wait_for_events(
+                sync_config.user.to_string(),
+                timeout_ms,
+                current_sync_callback,
+                from_token=from_token.stream_token,
+            )
+
+        return result
+
+    @trace
+    async def current_sync_for_user(
+        self,
+        sync_config: SlidingSyncConfig,
+        to_token: StreamToken,
+        from_token: Optional[SlidingSyncStreamToken] = None,
+    ) -> SlidingSyncResult:
+        """
+        Generates the response body of a Sliding Sync result, represented as a
+        `SlidingSyncResult`.
+
+        We fetch data according to the token range (> `from_token` and <= `to_token`).
+
+        Args:
+            sync_config: Sync configuration
+            to_token: The point in the stream to sync up to.
+            from_token: The point in the stream to sync from. Token of the end of the
+                previous batch. May be `None` if this is the initial sync request.
+        """
+        start_time_s = self.clock.time()
+
+        user_id = sync_config.user.to_string()
+        app_service = self.store.get_app_service_by_user_id(user_id)
+        if app_service:
+            # We no longer support AS users using /sync directly.
+            # See https://github.com/matrix-org/matrix-doc/issues/1144
+            raise NotImplementedError()
+
+        # Get the per-connection state (if any).
+        #
+        # Raises an exception if there is a `connection_position` that we don't
+        # recognize. If we don't do this and the client asks for the full range
+        # of rooms, we end up sending down all rooms and their state from
+        # scratch (which can be very slow). By expiring the connection we allow
+        # the client a chance to do an initial request with a smaller range of
+        # rooms to get them some results sooner but will end up taking the same
+        # amount of time (more with round-trips and re-processing) in the end to
+        # get everything again.
+        previous_connection_state = (
+            await self.connection_store.get_and_clear_connection_positions(
+                sync_config, from_token
+            )
+        )
+
+        # Get all of the room IDs that the user should be able to see in the sync
+        # response
+        has_lists = sync_config.lists is not None and len(sync_config.lists) > 0
+        has_room_subscriptions = (
+            sync_config.room_subscriptions is not None
+            and len(sync_config.room_subscriptions) > 0
+        )
+
+        interested_rooms = await self.room_lists.compute_interested_rooms(
+            sync_config=sync_config,
+            previous_connection_state=previous_connection_state,
+            from_token=from_token.stream_token if from_token else None,
+            to_token=to_token,
+        )
+
+        lists = interested_rooms.lists
+        relevant_room_map = interested_rooms.relevant_room_map
+        all_rooms = interested_rooms.all_rooms
+        room_membership_for_user_map = interested_rooms.room_membership_for_user_map
+        relevant_rooms_to_send_map = interested_rooms.relevant_rooms_to_send_map
+
+        # Fetch room data
+        rooms: Dict[str, SlidingSyncResult.RoomResult] = {}
+
+        new_connection_state = previous_connection_state.get_mutable()
+
+        @trace
+        @tag_args
+        async def handle_room(room_id: str) -> None:
+            room_sync_result = await self.get_room_sync_data(
+                sync_config=sync_config,
+                previous_connection_state=previous_connection_state,
+                new_connection_state=new_connection_state,
+                room_id=room_id,
+                room_sync_config=relevant_rooms_to_send_map[room_id],
+                room_membership_for_user_at_to_token=room_membership_for_user_map[
+                    room_id
+                ],
+                from_token=from_token,
+                to_token=to_token,
+                newly_joined=room_id in interested_rooms.newly_joined_rooms,
+                is_dm=room_id in interested_rooms.dm_room_ids,
+            )
+
+            # Filter out empty room results during incremental sync
+            if room_sync_result or not from_token:
+                rooms[room_id] = room_sync_result
+
+        if relevant_rooms_to_send_map:
+            with start_active_span("sliding_sync.generate_room_entries"):
+                await concurrently_execute(handle_room, relevant_rooms_to_send_map, 20)
+
+        extensions = await self.extensions.get_extensions_response(
+            sync_config=sync_config,
+            actual_lists=lists,
+            previous_connection_state=previous_connection_state,
+            new_connection_state=new_connection_state,
+            # We're purposely using `relevant_room_map` instead of
+            # `relevant_rooms_to_send_map` here. This needs to be all room_ids we could
+            # send regardless of whether they have an event update or not. The
+            # extensions care about more than just normal events in the rooms (like
+            # account data, read receipts, typing indicators, to-device messages, etc).
+            actual_room_ids=set(relevant_room_map.keys()),
+            actual_room_response_map=rooms,
+            from_token=from_token,
+            to_token=to_token,
+        )
+
+        if has_lists or has_room_subscriptions:
+            # We now calculate if any rooms outside the range have had updates,
+            # which we are not sending down.
+            #
+            # We *must* record rooms that have had updates, but it is also fine
+            # to record rooms as having updates even if there might not actually
+            # be anything new for the user (e.g. due to event filters, events
+            # having happened after the user left, etc).
+            if from_token:
+                # The set of rooms that the client (may) care about, but aren't
+                # in any list range (or subscribed to).
+                missing_rooms = all_rooms - relevant_room_map.keys()
+
+                # We now just go and try fetching any events in the above rooms
+                # to see if anything has happened since the `from_token`.
+                #
+                # TODO: Replace this with something faster. When we land the
+                # sliding sync tables that record the most recent event
+                # positions we can use that.
+                unsent_room_ids: StrCollection
+                if await self.store.have_finished_sliding_sync_background_jobs():
+                    unsent_room_ids = await (
+                        self.store.get_rooms_that_have_updates_since_sliding_sync_table(
+                            room_ids=missing_rooms,
+                            from_key=from_token.stream_token.room_key,
+                        )
+                    )
+                else:
+                    missing_event_map_by_room = (
+                        await self.store.get_room_events_stream_for_rooms(
+                            room_ids=missing_rooms,
+                            from_key=to_token.room_key,
+                            to_key=from_token.stream_token.room_key,
+                            limit=1,
+                        )
+                    )
+                    unsent_room_ids = list(missing_event_map_by_room)
+
+                new_connection_state.rooms.record_unsent_rooms(
+                    unsent_room_ids, from_token.stream_token.room_key
+                )
+
+            new_connection_state.rooms.record_sent_rooms(
+                relevant_rooms_to_send_map.keys()
+            )
+
+            connection_position = await self.connection_store.record_new_state(
+                sync_config=sync_config,
+                from_token=from_token,
+                new_connection_state=new_connection_state,
+            )
+        elif from_token:
+            connection_position = from_token.connection_position
+        else:
+            # Initial sync without a `from_token` starts at `0`
+            connection_position = 0
+
+        sliding_sync_result = SlidingSyncResult(
+            next_pos=SlidingSyncStreamToken(to_token, connection_position),
+            lists=lists,
+            rooms=rooms,
+            extensions=extensions,
+        )
+
+        # Make it easy to find traces for syncs that aren't empty
+        set_tag(SynapseTags.RESULT_PREFIX + "result", bool(sliding_sync_result))
+        set_tag(SynapseTags.FUNC_ARG_PREFIX + "sync_config.user", user_id)
+
+        end_time_s = self.clock.time()
+        sync_processing_time.labels(from_token is not None).observe(
+            end_time_s - start_time_s
+        )
+
+        return sliding_sync_result
+
+    @trace
+    async def get_current_state_ids_at(
+        self,
+        room_id: str,
+        room_membership_for_user_at_to_token: RoomsForUserType,
+        state_filter: StateFilter,
+        to_token: StreamToken,
+    ) -> StateMap[str]:
+        """
+        Get current state IDs for the user in the room according to their membership. This
+        will be the current state at the time of their LEAVE/BAN, otherwise will be the
+        current state <= to_token.
+
+        Args:
+            room_id: The room ID to fetch data for
+            room_membership_for_user_at_token: Membership information for the user
+                in the room at the time of `to_token`.
+            to_token: The point in the stream to sync up to.
+        """
+        state_ids: StateMap[str]
+        # People shouldn't see past their leave/ban event
+        if room_membership_for_user_at_to_token.membership in (
+            Membership.LEAVE,
+            Membership.BAN,
+        ):
+            # TODO: `get_state_ids_at(...)` doesn't take into account the "current
+            # state". Maybe we need to use
+            # `get_forward_extremities_for_room_at_stream_ordering(...)` to "Fetch the
+            # current state at the time."
+            state_ids = await self.storage_controllers.state.get_state_ids_at(
+                room_id,
+                stream_position=to_token.copy_and_replace(
+                    StreamKeyType.ROOM,
+                    room_membership_for_user_at_to_token.event_pos.to_room_stream_token(),
+                ),
+                state_filter=state_filter,
+                # Partially-stated rooms should have all state events except for
+                # remote membership events. Since we've already excluded
+                # partially-stated rooms unless `required_state` only has
+                # `["m.room.member", "$LAZY"]` for membership, we should be able to
+                # retrieve everything requested. When we're lazy-loading, if there
+                # are some remote senders in the timeline, we should also have their
+                # membership event because we had to auth that timeline event. Plus
+                # we don't want to block the whole sync waiting for this one room.
+                await_full_state=False,
+            )
+        # Otherwise, we can get the latest current state in the room
+        else:
+            state_ids = await self.storage_controllers.state.get_current_state_ids(
+                room_id,
+                state_filter,
+                # Partially-stated rooms should have all state events except for
+                # remote membership events. Since we've already excluded
+                # partially-stated rooms unless `required_state` only has
+                # `["m.room.member", "$LAZY"]` for membership, we should be able to
+                # retrieve everything requested. When we're lazy-loading, if there
+                # are some remote senders in the timeline, we should also have their
+                # membership event because we had to auth that timeline event. Plus
+                # we don't want to block the whole sync waiting for this one room.
+                await_full_state=False,
+            )
+            # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token`
+
+        return state_ids
+
+    @trace
+    async def get_current_state_at(
+        self,
+        room_id: str,
+        room_membership_for_user_at_to_token: RoomsForUserType,
+        state_filter: StateFilter,
+        to_token: StreamToken,
+    ) -> StateMap[EventBase]:
+        """
+        Get current state for the user in the room according to their membership. This
+        will be the current state at the time of their LEAVE/BAN, otherwise will be the
+        current state <= to_token.
+
+        Args:
+            room_id: The room ID to fetch data for
+            room_membership_for_user_at_token: Membership information for the user
+                in the room at the time of `to_token`.
+            to_token: The point in the stream to sync up to.
+        """
+        state_ids = await self.get_current_state_ids_at(
+            room_id=room_id,
+            room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
+            state_filter=state_filter,
+            to_token=to_token,
+        )
+
+        events = await self.store.get_events_as_list(list(state_ids.values()))
+
+        state_map = {}
+        for event in events:
+            state_map[(event.type, event.state_key)] = event
+
+        return state_map
+
+    @trace
+    async def get_room_sync_data(
+        self,
+        sync_config: SlidingSyncConfig,
+        previous_connection_state: "PerConnectionState",
+        new_connection_state: "MutablePerConnectionState",
+        room_id: str,
+        room_sync_config: RoomSyncConfig,
+        room_membership_for_user_at_to_token: RoomsForUserType,
+        from_token: Optional[SlidingSyncStreamToken],
+        to_token: StreamToken,
+        newly_joined: bool,
+        is_dm: bool,
+    ) -> SlidingSyncResult.RoomResult:
+        """
+        Fetch room data for the sync response.
+
+        We fetch data according to the token range (> `from_token` and <= `to_token`).
+
+        Args:
+            user: User to fetch data for
+            room_id: The room ID to fetch data for
+            room_sync_config: Config for what data we should fetch for a room in the
+                sync response.
+            room_membership_for_user_at_to_token: Membership information for the user
+                in the room at the time of `to_token`.
+            from_token: The point in the stream to sync from.
+            to_token: The point in the stream to sync up to.
+            newly_joined: If the user has newly joined the room
+            is_dm: Whether the room is a DM room
+        """
+        user = sync_config.user
+
+        set_tag(
+            SynapseTags.FUNC_ARG_PREFIX + "membership",
+            room_membership_for_user_at_to_token.membership,
+        )
+        set_tag(
+            SynapseTags.FUNC_ARG_PREFIX + "timeline_limit",
+            room_sync_config.timeline_limit,
+        )
+
+        # Handle state resets. For example, if we see
+        # `room_membership_for_user_at_to_token.event_id=None and
+        # room_membership_for_user_at_to_token.membership is not None`, we should
+        # indicate to the client that a state reset happened. Perhaps we should indicate
+        # this by setting `initial: True` and empty `required_state: []`.
+        state_reset_out_of_room = False
+        if (
+            room_membership_for_user_at_to_token.event_id is None
+            and room_membership_for_user_at_to_token.membership is not None
+        ):
+            # We only expect the `event_id` to be `None` if you've been state reset out
+            # of the room (meaning you're no longer in the room). We could put this as
+            # part of the if-statement above but we want to handle every case where
+            # `event_id` is `None`.
+            assert room_membership_for_user_at_to_token.membership is Membership.LEAVE
+
+            state_reset_out_of_room = True
+
+        prev_room_sync_config = previous_connection_state.room_configs.get(room_id)
+
+        # Determine whether we should limit the timeline to the token range.
+        #
+        # We should return historical messages (before token range) in the
+        # following cases because we want clients to be able to show a basic
+        # screen of information:
+        #
+        #  - Initial sync (because no `from_token` to limit us anyway)
+        #  - When users `newly_joined`
+        #  - For an incremental sync where we haven't sent it down this
+        #    connection before
+        #
+        # Relevant spec issue:
+        # https://github.com/matrix-org/matrix-spec/issues/1917
+        #
+        # XXX: Odd behavior - We also check if the `timeline_limit` has increased, if so
+        # we ignore the from bound for the timeline to send down a larger chunk of
+        # history and set `unstable_expanded_timeline` to true. This is only being added
+        # to match the behavior of the Sliding Sync proxy as we expect the ElementX
+        # client to feel a certain way and be able to trickle in a full page of timeline
+        # messages to fill up the screen. This is a bit different to the behavior of the
+        # Sliding Sync proxy (which sets initial=true, but then doesn't send down the
+        # full state again), but existing apps, e.g. ElementX, just need `limited` set.
+        # We don't explicitly set `limited` but this will be the case for any room that
+        # has more history than we're trying to pull out. Using
+        # `unstable_expanded_timeline` allows us to avoid contaminating what `initial`
+        # or `limited` mean for clients that interpret them correctly. In future this
+        # behavior is almost certainly going to change.
+        #
+        from_bound = None
+        initial = True
+        ignore_timeline_bound = False
+        if from_token and not newly_joined and not state_reset_out_of_room:
+            room_status = previous_connection_state.rooms.have_sent_room(room_id)
+            if room_status.status == HaveSentRoomFlag.LIVE:
+                from_bound = from_token.stream_token.room_key
+                initial = False
+            elif room_status.status == HaveSentRoomFlag.PREVIOUSLY:
+                assert room_status.last_token is not None
+                from_bound = room_status.last_token
+                initial = False
+            elif room_status.status == HaveSentRoomFlag.NEVER:
+                from_bound = None
+                initial = True
+            else:
+                assert_never(room_status.status)
+
+            log_kv({"sliding_sync.room_status": room_status})
+
+            if prev_room_sync_config is not None:
+                # Check if the timeline limit has increased, if so ignore the
+                # timeline bound and record the change (see "XXX: Odd behavior"
+                # above).
+                if (
+                    prev_room_sync_config.timeline_limit
+                    < room_sync_config.timeline_limit
+                ):
+                    ignore_timeline_bound = True
+
+        log_kv(
+            {
+                "sliding_sync.from_bound": from_bound,
+                "sliding_sync.initial": initial,
+                "sliding_sync.ignore_timeline_bound": ignore_timeline_bound,
+            }
+        )
+
+        # Assemble the list of timeline events
+        #
+        # FIXME: It would be nice to make the `rooms` response more uniform regardless of
+        # membership. Currently, we have to make all of these optional because
+        # `invite`/`knock` rooms only have `stripped_state`. See
+        # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932
+        timeline_events: List[EventBase] = []
+        bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None
+        limited: Optional[bool] = None
+        prev_batch_token: Optional[StreamToken] = None
+        num_live: Optional[int] = None
+        if (
+            room_sync_config.timeline_limit > 0
+            # No timeline for invite/knock rooms (just `stripped_state`)
+            and room_membership_for_user_at_to_token.membership
+            not in (Membership.INVITE, Membership.KNOCK)
+        ):
+            limited = False
+            # We want to start off using the `to_token` (vs `from_token`) because we look
+            # backwards from the `to_token` up to the `timeline_limit` and we might not
+            # reach the `from_token` before we hit the limit. We will update the room stream
+            # position once we've fetched the events to point to the earliest event fetched.
+            prev_batch_token = to_token
+
+            # We're going to paginate backwards from the `to_token`
+            to_bound = to_token.room_key
+            # People shouldn't see past their leave/ban event
+            if room_membership_for_user_at_to_token.membership in (
+                Membership.LEAVE,
+                Membership.BAN,
+            ):
+                to_bound = room_membership_for_user_at_to_token.event_pos.to_room_stream_token()
+
+            timeline_from_bound = from_bound
+            if ignore_timeline_bound:
+                timeline_from_bound = None
+
+            # For initial `/sync` (and other historical scenarios mentioned above), we
+            # want to view a historical section of the timeline; to fetch events by
+            # `topological_ordering` (best representation of the room DAG as others were
+            # seeing it at the time). This also aligns with the order that `/messages`
+            # returns events in.
+            #
+            # For incremental `/sync`, we want to get all updates for rooms since
+            # the last `/sync` (regardless if those updates arrived late or happened
+            # a while ago in the past); to fetch events by `stream_ordering` (in the
+            # order they were received by the server).
+            #
+            # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917
+            #
+            # FIXME: Using workaround for mypy,
+            # https://github.com/python/mypy/issues/10740#issuecomment-1997047277 and
+            # https://github.com/python/mypy/issues/17479
+            paginate_room_events_by_topological_ordering: PaginateFunction = (
+                self.store.paginate_room_events_by_topological_ordering
+            )
+            paginate_room_events_by_stream_ordering: PaginateFunction = (
+                self.store.paginate_room_events_by_stream_ordering
+            )
+            pagination_method: PaginateFunction = (
+                # Use `topographical_ordering` for historical events
+                paginate_room_events_by_topological_ordering
+                if timeline_from_bound is None
+                # Use `stream_ordering` for updates
+                else paginate_room_events_by_stream_ordering
+            )
+            timeline_events, new_room_key, limited = await pagination_method(
+                room_id=room_id,
+                # The bounds are reversed so we can paginate backwards
+                # (from newer to older events) starting at to_bound.
+                # This ensures we fill the `limit` with the newest events first,
+                from_key=to_bound,
+                to_key=timeline_from_bound,
+                direction=Direction.BACKWARDS,
+                limit=room_sync_config.timeline_limit,
+            )
+
+            # We want to return the events in ascending order (the last event is the
+            # most recent).
+            timeline_events.reverse()
+
+            # Make sure we don't expose any events that the client shouldn't see
+            timeline_events = await filter_events_for_client(
+                self.storage_controllers,
+                user.to_string(),
+                timeline_events,
+                is_peeking=room_membership_for_user_at_to_token.membership
+                != Membership.JOIN,
+                filter_send_to_client=True,
+            )
+            # TODO: Filter out `EventTypes.CallInvite` in public rooms,
+            # see https://github.com/element-hq/synapse/issues/17359
+
+            # TODO: Handle timeline gaps (`get_timeline_gaps()`)
+
+            # Determine how many "live" events we have (events within the given token range).
+            #
+            # This is mostly useful to determine whether a given @mention event should
+            # make a noise or not. Clients cannot rely solely on the absence of
+            # `initial: true` to determine live events because if a room not in the
+            # sliding window bumps into the window because of an @mention it will have
+            # `initial: true` yet contain a single live event (with potentially other
+            # old events in the timeline)
+            num_live = 0
+            if from_token is not None:
+                for timeline_event in reversed(timeline_events):
+                    # This fields should be present for all persisted events
+                    assert timeline_event.internal_metadata.stream_ordering is not None
+                    assert timeline_event.internal_metadata.instance_name is not None
+
+                    persisted_position = PersistedEventPosition(
+                        instance_name=timeline_event.internal_metadata.instance_name,
+                        stream=timeline_event.internal_metadata.stream_ordering,
+                    )
+                    if persisted_position.persisted_after(
+                        from_token.stream_token.room_key
+                    ):
+                        num_live += 1
+                    else:
+                        # Since we're iterating over the timeline events in
+                        # reverse-chronological order, we can break once we hit an event
+                        # that's not live. In the future, we could potentially optimize
+                        # this more with a binary search (bisect).
+                        break
+
+            # If the timeline is `limited=True`, the client does not have all events
+            # necessary to calculate aggregations themselves.
+            if limited:
+                bundled_aggregations = (
+                    await self.relations_handler.get_bundled_aggregations(
+                        timeline_events, user.to_string()
+                    )
+                )
+
+            # Update the `prev_batch_token` to point to the position that allows us to
+            # keep paginating backwards from the oldest event we return in the timeline.
+            prev_batch_token = prev_batch_token.copy_and_replace(
+                StreamKeyType.ROOM, new_room_key
+            )
+
+        # Figure out any stripped state events for invite/knocks. This allows the
+        # potential joiner to identify the room.
+        stripped_state: List[JsonDict] = []
+        if room_membership_for_user_at_to_token.membership in (
+            Membership.INVITE,
+            Membership.KNOCK,
+        ):
+            # This should never happen. If someone is invited/knocked on room, then
+            # there should be an event for it.
+            assert room_membership_for_user_at_to_token.event_id is not None
+
+            invite_or_knock_event = await self.store.get_event(
+                room_membership_for_user_at_to_token.event_id
+            )
+
+            stripped_state = []
+            if invite_or_knock_event.membership == Membership.INVITE:
+                stripped_state.extend(
+                    invite_or_knock_event.unsigned.get("invite_room_state", [])
+                )
+            elif invite_or_knock_event.membership == Membership.KNOCK:
+                stripped_state.extend(
+                    invite_or_knock_event.unsigned.get("knock_room_state", [])
+                )
+
+            stripped_state.append(strip_event(invite_or_knock_event))
+
+        # Get the changes to current state in the token range from the
+        # `current_state_delta_stream` table.
+        #
+        # For incremental syncs, we can do this first to determine if something relevant
+        # has changed and strategically avoid fetching other costly things.
+        room_state_delta_id_map: MutableStateMap[str] = {}
+        name_event_id: Optional[str] = None
+        membership_changed = False
+        name_changed = False
+        avatar_changed = False
+        if initial:
+            # Check whether the room has a name set
+            name_state_ids = await self.get_current_state_ids_at(
+                room_id=room_id,
+                room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
+                state_filter=StateFilter.from_types([(EventTypes.Name, "")]),
+                to_token=to_token,
+            )
+            name_event_id = name_state_ids.get((EventTypes.Name, ""))
+        else:
+            assert from_bound is not None
+
+            # TODO: Limit the number of state events we're about to send down
+            # the room, if its too many we should change this to an
+            # `initial=True`?
+            deltas = await self.store.get_current_state_deltas_for_room(
+                room_id=room_id,
+                from_token=from_bound,
+                to_token=to_token.room_key,
+            )
+            for delta in deltas:
+                # TODO: Handle state resets where event_id is None
+                if delta.event_id is not None:
+                    room_state_delta_id_map[(delta.event_type, delta.state_key)] = (
+                        delta.event_id
+                    )
+
+                if delta.event_type == EventTypes.Member:
+                    membership_changed = True
+                elif delta.event_type == EventTypes.Name and delta.state_key == "":
+                    name_changed = True
+                elif (
+                    delta.event_type == EventTypes.RoomAvatar and delta.state_key == ""
+                ):
+                    avatar_changed = True
+
+        # We only need the room summary for calculating heroes, however if we do
+        # fetch it then we can use it to calculate `joined_count` and
+        # `invited_count`.
+        room_membership_summary: Optional[Mapping[str, MemberSummary]] = None
+
+        # `heroes` are required if the room name is not set.
+        #
+        # Note: When you're the first one on your server to be invited to a new room
+        # over federation, we only have access to some stripped state in
+        # `event.unsigned.invite_room_state` which currently doesn't include `heroes`,
+        # see https://github.com/matrix-org/matrix-spec/issues/380. This means that
+        # clients won't be able to calculate the room name when necessary and just a
+        # pitfall we have to deal with until that spec issue is resolved.
+        hero_user_ids: List[str] = []
+        # TODO: Should we also check for `EventTypes.CanonicalAlias`
+        # (`m.room.canonical_alias`) as a fallback for the room name? see
+        # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153
+        #
+        # We need to fetch the `heroes` if the room name is not set. But we only need to
+        # get them on initial syncs (or the first time we send down the room) or if the
+        # membership has changed which may change the heroes.
+        if name_event_id is None and (initial or (not initial and membership_changed)):
+            # We need the room summary to extract the heroes from
+            if room_membership_for_user_at_to_token.membership != Membership.JOIN:
+                # TODO: Figure out how to get the membership summary for left/banned rooms
+                # For invite/knock rooms we don't include the information.
+                room_membership_summary = {}
+            else:
+                room_membership_summary = await self.store.get_room_summary(room_id)
+                # TODO: Reverse/rewind back to the `to_token`
+
+            hero_user_ids = extract_heroes_from_room_summary(
+                room_membership_summary, me=user.to_string()
+            )
+
+        # Fetch the membership counts for rooms we're joined to.
+        #
+        # Similarly to other metadata, we only need to calculate the member
+        # counts if this is an initial sync or the memberships have changed.
+        joined_count: Optional[int] = None
+        invited_count: Optional[int] = None
+        if (
+            initial or membership_changed
+        ) and room_membership_for_user_at_to_token.membership == Membership.JOIN:
+            # If we have the room summary (because we calculated heroes above)
+            # then we can simply pull the counts from there.
+            if room_membership_summary is not None:
+                empty_membership_summary = MemberSummary([], 0)
+
+                joined_count = room_membership_summary.get(
+                    Membership.JOIN, empty_membership_summary
+                ).count
+
+                invited_count = room_membership_summary.get(
+                    Membership.INVITE, empty_membership_summary
+                ).count
+            else:
+                member_counts = await self.store.get_member_counts(room_id)
+                joined_count = member_counts.get(Membership.JOIN, 0)
+                invited_count = member_counts.get(Membership.INVITE, 0)
+
+        # Fetch the `required_state` for the room
+        #
+        # No `required_state` for invite/knock rooms (just `stripped_state`)
+        #
+        # FIXME: It would be nice to make the `rooms` response more uniform regardless
+        # of membership. Currently, we have to make this optional because
+        # `invite`/`knock` rooms only have `stripped_state`. See
+        # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932
+        #
+        # Calculate the `StateFilter` based on the `required_state` for the room
+        required_state_filter = StateFilter.none()
+        if room_membership_for_user_at_to_token.membership not in (
+            Membership.INVITE,
+            Membership.KNOCK,
+        ):
+            # If we have a double wildcard ("*", "*") in the `required_state`, we need
+            # to fetch all state for the room
+            #
+            # Note: MSC3575 describes different behavior to how we're handling things
+            # here but since it's not wrong to return more state than requested
+            # (`required_state` is just the minimum requested), it doesn't matter if we
+            # include more than client wanted. This complexity is also under scrutiny,
+            # see
+            # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1185109050
+            #
+            # > One unique exception is when you request all state events via ["*", "*"]. When used,
+            # > all state events are returned by default, and additional entries FILTER OUT the returned set
+            # > of state events. These additional entries cannot use '*' themselves.
+            # > For example, ["*", "*"], ["m.room.member", "@alice:example.com"] will _exclude_ every m.room.member
+            # > event _except_ for @alice:example.com, and include every other state event.
+            # > In addition, ["*", "*"], ["m.space.child", "*"] is an error, the m.space.child filter is not
+            # > required as it would have been returned anyway.
+            # >
+            # > -- MSC3575 (https://github.com/matrix-org/matrix-spec-proposals/pull/3575)
+            if StateValues.WILDCARD in room_sync_config.required_state_map.get(
+                StateValues.WILDCARD, set()
+            ):
+                set_tag(
+                    SynapseTags.FUNC_ARG_PREFIX + "required_state_wildcard",
+                    True,
+                )
+                required_state_filter = StateFilter.all()
+            # TODO: `StateFilter` currently doesn't support wildcard event types. We're
+            # currently working around this by returning all state to the client but it
+            # would be nice to fetch less from the database and return just what the
+            # client wanted.
+            elif (
+                room_sync_config.required_state_map.get(StateValues.WILDCARD)
+                is not None
+            ):
+                set_tag(
+                    SynapseTags.FUNC_ARG_PREFIX + "required_state_wildcard_event_type",
+                    True,
+                )
+                required_state_filter = StateFilter.all()
+            else:
+                required_state_types: List[Tuple[str, Optional[str]]] = []
+                num_wild_state_keys = 0
+                lazy_load_room_members = False
+                num_others = 0
+                for (
+                    state_type,
+                    state_key_set,
+                ) in room_sync_config.required_state_map.items():
+                    for state_key in state_key_set:
+                        if state_key == StateValues.WILDCARD:
+                            num_wild_state_keys += 1
+                            # `None` is a wildcard in the `StateFilter`
+                            required_state_types.append((state_type, None))
+                        # We need to fetch all relevant people when we're lazy-loading membership
+                        elif (
+                            state_type == EventTypes.Member
+                            and state_key == StateValues.LAZY
+                        ):
+                            lazy_load_room_members = True
+                            # Everyone in the timeline is relevant
+                            timeline_membership: Set[str] = set()
+                            if timeline_events is not None:
+                                for timeline_event in timeline_events:
+                                    timeline_membership.add(timeline_event.sender)
+
+                            for user_id in timeline_membership:
+                                required_state_types.append(
+                                    (EventTypes.Member, user_id)
+                                )
+
+                            # FIXME: We probably also care about invite, ban, kick, targets, etc
+                            # but the spec only mentions "senders".
+                        elif state_key == StateValues.ME:
+                            num_others += 1
+                            required_state_types.append((state_type, user.to_string()))
+                        else:
+                            num_others += 1
+                            required_state_types.append((state_type, state_key))
+
+                set_tag(
+                    SynapseTags.FUNC_ARG_PREFIX
+                    + "required_state_wildcard_state_key_count",
+                    num_wild_state_keys,
+                )
+                set_tag(
+                    SynapseTags.FUNC_ARG_PREFIX + "required_state_lazy",
+                    lazy_load_room_members,
+                )
+                set_tag(
+                    SynapseTags.FUNC_ARG_PREFIX + "required_state_other_count",
+                    num_others,
+                )
+
+                required_state_filter = StateFilter.from_types(required_state_types)
+
+        # We need this base set of info for the response so let's just fetch it along
+        # with the `required_state` for the room
+        hero_room_state = [
+            (EventTypes.Member, hero_user_id) for hero_user_id in hero_user_ids
+        ]
+        meta_room_state = list(hero_room_state)
+        if initial or name_changed:
+            meta_room_state.append((EventTypes.Name, ""))
+        if initial or avatar_changed:
+            meta_room_state.append((EventTypes.RoomAvatar, ""))
+
+        state_filter = StateFilter.all()
+        if required_state_filter != StateFilter.all():
+            state_filter = StateFilter(
+                types=StateFilter.from_types(
+                    chain(meta_room_state, required_state_filter.to_types())
+                ).types,
+                include_others=required_state_filter.include_others,
+            )
+
+        # The required state map to store in the room sync config, if it has
+        # changed.
+        changed_required_state_map: Optional[Mapping[str, AbstractSet[str]]] = None
+
+        # We can return all of the state that was requested if this was the first
+        # time we've sent the room down this connection.
+        room_state: StateMap[EventBase] = {}
+        if initial:
+            room_state = await self.get_current_state_at(
+                room_id=room_id,
+                room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
+                state_filter=state_filter,
+                to_token=to_token,
+            )
+        else:
+            assert from_bound is not None
+
+            if prev_room_sync_config is not None:
+                # Check if there are any changes to the required state config
+                # that we need to handle.
+                changed_required_state_map, added_state_filter = (
+                    _required_state_changes(
+                        user.to_string(),
+                        previous_room_config=prev_room_sync_config,
+                        room_sync_config=room_sync_config,
+                        state_deltas=room_state_delta_id_map,
+                    )
+                )
+
+                if added_state_filter:
+                    # Some state entries got added, so we pull out the current
+                    # state for them. If we don't do this we'd only send down new deltas.
+                    state_ids = await self.get_current_state_ids_at(
+                        room_id=room_id,
+                        room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
+                        state_filter=added_state_filter,
+                        to_token=to_token,
+                    )
+                    room_state_delta_id_map.update(state_ids)
+
+            events = await self.store.get_events(
+                state_filter.filter_state(room_state_delta_id_map).values()
+            )
+            room_state = {(s.type, s.state_key): s for s in events.values()}
+
+            # If the membership changed and we have to get heroes, get the remaining
+            # heroes from the state
+            if hero_user_ids:
+                hero_membership_state = await self.get_current_state_at(
+                    room_id=room_id,
+                    room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
+                    state_filter=StateFilter.from_types(hero_room_state),
+                    to_token=to_token,
+                )
+                room_state.update(hero_membership_state)
+
+        required_room_state: StateMap[EventBase] = {}
+        if required_state_filter != StateFilter.none():
+            required_room_state = required_state_filter.filter_state(room_state)
+
+        # Find the room name and avatar from the state
+        room_name: Optional[str] = None
+        # TODO: Should we also check for `EventTypes.CanonicalAlias`
+        # (`m.room.canonical_alias`) as a fallback for the room name? see
+        # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153
+        name_event = room_state.get((EventTypes.Name, ""))
+        if name_event is not None:
+            room_name = name_event.content.get("name")
+
+        room_avatar: Optional[str] = None
+        avatar_event = room_state.get((EventTypes.RoomAvatar, ""))
+        if avatar_event is not None:
+            room_avatar = avatar_event.content.get("url")
+
+        # Assemble heroes: extract the info from the state we just fetched
+        heroes: List[SlidingSyncResult.RoomResult.StrippedHero] = []
+        for hero_user_id in hero_user_ids:
+            member_event = room_state.get((EventTypes.Member, hero_user_id))
+            if member_event is not None:
+                heroes.append(
+                    SlidingSyncResult.RoomResult.StrippedHero(
+                        user_id=hero_user_id,
+                        display_name=member_event.content.get("displayname"),
+                        avatar_url=member_event.content.get("avatar_url"),
+                    )
+                )
+
+        # Figure out the last bump event in the room. If the bump stamp hasn't
+        # changed we omit it from the response.
+        bump_stamp = None
+
+        always_return_bump_stamp = (
+            # We use the membership event position for any non-join
+            room_membership_for_user_at_to_token.membership != Membership.JOIN
+            # We didn't fetch any timeline events but we should still check for
+            # a bump_stamp that might be somewhere
+            or limited is None
+            # There might be a bump event somewhere before the timeline events
+            # that we fetched, that we didn't previously send down
+            or limited is True
+            # Always give the client some frame of reference if this is the
+            # first time they are seeing the room down the connection
+            or initial
+        )
+
+        # If we're joined to the room, we need to find the last bump event before the
+        # `to_token`
+        if room_membership_for_user_at_to_token.membership == Membership.JOIN:
+            # Try and get a bump stamp
+            new_bump_stamp = await self._get_bump_stamp(
+                room_id,
+                to_token,
+                timeline_events,
+                check_outside_timeline=always_return_bump_stamp,
+            )
+            if new_bump_stamp is not None:
+                bump_stamp = new_bump_stamp
+
+        if bump_stamp is None and always_return_bump_stamp:
+            # By default, just choose the membership event position for any non-join membership
+            bump_stamp = room_membership_for_user_at_to_token.event_pos.stream
+
+        if bump_stamp is not None and bump_stamp < 0:
+            # We never want to send down negative stream orderings, as you can't
+            # sensibly compare positive and negative stream orderings (they have
+            # different meanings).
+            #
+            # A negative bump stamp here can only happen if the stream ordering
+            # of the membership event is negative (and there are no further bump
+            # stamps), which can happen if the server leaves and deletes a room,
+            # and then rejoins it.
+            #
+            # To deal with this, we just set the bump stamp to zero, which will
+            # shove this room to the bottom of the list. This is OK as the
+            # moment a new message happens in the room it will get put into a
+            # sensible order again.
+            bump_stamp = 0
+
+        room_sync_required_state_map_to_persist = room_sync_config.required_state_map
+        if changed_required_state_map:
+            room_sync_required_state_map_to_persist = changed_required_state_map
+
+        # Record the `room_sync_config` if we're `ignore_timeline_bound` (which means
+        # that the `timeline_limit` has increased)
+        unstable_expanded_timeline = False
+        if ignore_timeline_bound:
+            # FIXME: We signal the fact that we're sending down more events to
+            # the client by setting `unstable_expanded_timeline` to true (see
+            # "XXX: Odd behavior" above).
+            unstable_expanded_timeline = True
+
+            new_connection_state.room_configs[room_id] = RoomSyncConfig(
+                timeline_limit=room_sync_config.timeline_limit,
+                required_state_map=room_sync_required_state_map_to_persist,
+            )
+        elif prev_room_sync_config is not None:
+            # If the result is `limited` then we need to record that the
+            # `timeline_limit` has been reduced, as when/if the client later requests
+            # more timeline then we have more data to send.
+            #
+            # Otherwise (when not `limited`) we don't need to record that the
+            # `timeline_limit` has been reduced, as the *effective* `timeline_limit`
+            # (i.e. the amount of timeline we have previously sent to the client) is at
+            # least the previous `timeline_limit`.
+            #
+            # This is to handle the case where the `timeline_limit` e.g. goes from 10 to
+            # 5 to 10 again (without any timeline gaps), where there's no point sending
+            # down the initial historical chunk events when the `timeline_limit` is
+            # increased as the client already has the 10 previous events. However, if
+            # client has a gap in the timeline (i.e. `limited` is True), then we *do*
+            # need to record the reduced timeline.
+            #
+            # TODO: Handle timeline gaps (`get_timeline_gaps()`) - This is separate from
+            # the gaps we might see on the client because a response was `limited` we're
+            # talking about above.
+            if (
+                limited
+                and prev_room_sync_config.timeline_limit
+                > room_sync_config.timeline_limit
+            ):
+                new_connection_state.room_configs[room_id] = RoomSyncConfig(
+                    timeline_limit=room_sync_config.timeline_limit,
+                    required_state_map=room_sync_required_state_map_to_persist,
+                )
+
+            elif changed_required_state_map is not None:
+                new_connection_state.room_configs[room_id] = RoomSyncConfig(
+                    timeline_limit=room_sync_config.timeline_limit,
+                    required_state_map=room_sync_required_state_map_to_persist,
+                )
+
+        else:
+            new_connection_state.room_configs[room_id] = room_sync_config
+
+        set_tag(SynapseTags.RESULT_PREFIX + "initial", initial)
+
+        return SlidingSyncResult.RoomResult(
+            name=room_name,
+            avatar=room_avatar,
+            heroes=heroes,
+            is_dm=is_dm,
+            initial=initial,
+            required_state=list(required_room_state.values()),
+            timeline_events=timeline_events,
+            bundled_aggregations=bundled_aggregations,
+            stripped_state=stripped_state,
+            prev_batch=prev_batch_token,
+            limited=limited,
+            unstable_expanded_timeline=unstable_expanded_timeline,
+            num_live=num_live,
+            bump_stamp=bump_stamp,
+            joined_count=joined_count,
+            invited_count=invited_count,
+            # TODO: These are just dummy values. We could potentially just remove these
+            # since notifications can only really be done correctly on the client anyway
+            # (encrypted rooms).
+            notification_count=0,
+            highlight_count=0,
+        )
+
+    @trace
+    async def _get_bump_stamp(
+        self,
+        room_id: str,
+        to_token: StreamToken,
+        timeline: List[EventBase],
+        check_outside_timeline: bool,
+    ) -> Optional[int]:
+        """Get a bump stamp for the room, if we have a bump event and it has
+        changed.
+
+        Args:
+            room_id
+            to_token: The upper bound of token to return
+            timeline: The list of events we have fetched.
+            limited: If the timeline was limited.
+            check_outside_timeline: Whether we need to check for bump stamp for
+                events before the timeline if we didn't find a bump stamp in
+                the timeline events.
+        """
+
+        # First check the timeline events we're returning to see if one of
+        # those matches. We iterate backwards and take the stream ordering
+        # of the first event that matches the bump event types.
+        for timeline_event in reversed(timeline):
+            if timeline_event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES:
+                new_bump_stamp = timeline_event.internal_metadata.stream_ordering
+
+                # All persisted events have a stream ordering
+                assert new_bump_stamp is not None
+
+                # If we've just joined a remote room, then the last bump event may
+                # have been backfilled (and so have a negative stream ordering).
+                # These negative stream orderings can't sensibly be compared, so
+                # instead we use the membership event position.
+                if new_bump_stamp > 0:
+                    return new_bump_stamp
+
+        if not check_outside_timeline:
+            # If we are not a limited sync, then we know the bump stamp can't
+            # have changed.
+            return None
+
+        # We can quickly query for the latest bump event in the room using the
+        # sliding sync tables.
+        latest_room_bump_stamp = await self.store.get_latest_bump_stamp_for_room(
+            room_id
+        )
+
+        min_to_token_position = to_token.room_key.stream
+
+        # If we can rely on the new sliding sync tables and the `bump_stamp` is
+        # `None`, just fallback to the membership event position. This can happen
+        # when we've just joined a remote room and all the events are backfilled.
+        if (
+            # FIXME: The background job check can be removed once we bump
+            # `SCHEMA_COMPAT_VERSION` and run the foreground update for
+            # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots`
+            # (tracked by https://github.com/element-hq/synapse/issues/17623)
+            latest_room_bump_stamp is None
+            and await self.store.have_finished_sliding_sync_background_jobs()
+        ):
+            return None
+
+        # The `bump_stamp` stored in the database might be ahead of our token. Since
+        # `bump_stamp` is only a `stream_ordering` position, we can't be 100% sure
+        # that's before the `to_token` in all scenarios. The only scenario we can be
+        # sure of is if the `bump_stamp` is totally before the minimum position from
+        # the token.
+        #
+        # We don't need to check if the background update has finished, as if the
+        # returned bump stamp is not None then it must be up to date.
+        elif (
+            latest_room_bump_stamp is not None
+            and latest_room_bump_stamp < min_to_token_position
+        ):
+            if latest_room_bump_stamp > 0:
+                return latest_room_bump_stamp
+            else:
+                return None
+
+        # Otherwise, if it's within or after the `to_token`, we need to find the
+        # last bump event before the `to_token`.
+        else:
+            last_bump_event_result = (
+                await self.store.get_last_event_pos_in_room_before_stream_ordering(
+                    room_id,
+                    to_token.room_key,
+                    event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES,
+                )
+            )
+            if last_bump_event_result is not None:
+                _, new_bump_event_pos = last_bump_event_result
+
+                # If we've just joined a remote room, then the last bump event may
+                # have been backfilled (and so have a negative stream ordering).
+                # These negative stream orderings can't sensibly be compared, so
+                # instead we use the membership event position.
+                if new_bump_event_pos.stream > 0:
+                    return new_bump_event_pos.stream
+
+            return None
+
+
+def _required_state_changes(
+    user_id: str,
+    *,
+    previous_room_config: "RoomSyncConfig",
+    room_sync_config: RoomSyncConfig,
+    state_deltas: StateMap[str],
+) -> Tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]:
+    """Calculates the changes between the required state room config from the
+    previous requests compared with the current request.
+
+    This does two things. First, it calculates if we need to update the room
+    config due to changes to required state. Secondly, it works out which state
+    entries we need to pull from current state and return due to the state entry
+    now appearing in the required state when it previously wasn't (on top of the
+    state deltas).
+
+    This function tries to ensure to handle the case where a state entry is
+    added, removed and then added again to the required state. In that case we
+    only want to re-send that entry down sync if it has changed.
+
+    Returns:
+        A 2-tuple of updated required state config (or None if there is no update)
+        and the state filter to use to fetch extra current state that we need to
+        return.
+    """
+
+    prev_required_state_map = previous_room_config.required_state_map
+    request_required_state_map = room_sync_config.required_state_map
+
+    if prev_required_state_map == request_required_state_map:
+        # There has been no change. Return immediately.
+        return None, StateFilter.none()
+
+    prev_wildcard = prev_required_state_map.get(StateValues.WILDCARD, set())
+    request_wildcard = request_required_state_map.get(StateValues.WILDCARD, set())
+
+    # If we were previously fetching everything ("*", "*"), always update the effective
+    # room required state config to match the request. And since we we're previously
+    # already fetching everything, we don't have to fetch anything now that they've
+    # narrowed.
+    if StateValues.WILDCARD in prev_wildcard:
+        return request_required_state_map, StateFilter.none()
+
+    # If a event type wildcard has been added or removed we don't try and do
+    # anything fancy, and instead always update the effective room required
+    # state config to match the request.
+    if request_wildcard - prev_wildcard:
+        # Some keys were added, so we need to fetch everything
+        return request_required_state_map, StateFilter.all()
+    if prev_wildcard - request_wildcard:
+        # Keys were only removed, so we don't have to fetch everything.
+        return request_required_state_map, StateFilter.none()
+
+    # Contains updates to the required state map compared with the previous room
+    # config. This has the same format as `RoomSyncConfig.required_state`
+    changes: Dict[str, AbstractSet[str]] = {}
+
+    # The set of types/state keys that we need to fetch and return to the
+    # client. Passed to `StateFilter.from_types(...)`
+    added: List[Tuple[str, Optional[str]]] = []
+
+    # First we calculate what, if anything, has been *added*.
+    for event_type in (
+        prev_required_state_map.keys() | request_required_state_map.keys()
+    ):
+        old_state_keys = prev_required_state_map.get(event_type, set())
+        request_state_keys = request_required_state_map.get(event_type, set())
+
+        if old_state_keys == request_state_keys:
+            # No change to this type
+            continue
+
+        if not request_state_keys - old_state_keys:
+            # Nothing *added*, so we skip. Removals happen below.
+            continue
+
+        # Always update changes to include the newly added keys
+        changes[event_type] = request_state_keys
+
+        if StateValues.WILDCARD in old_state_keys:
+            # We were previously fetching everything for this type, so we don't need to
+            # fetch anything new.
+            continue
+
+        # Record the new state keys to fetch for this type.
+        if StateValues.WILDCARD in request_state_keys:
+            # If we have added a wildcard then we always just fetch everything.
+            added.append((event_type, None))
+        else:
+            for state_key in request_state_keys - old_state_keys:
+                if state_key == StateValues.ME:
+                    added.append((event_type, user_id))
+                elif state_key == StateValues.LAZY:
+                    # We handle lazy loading separately (outside this function),
+                    # so don't need to explicitly add anything here.
+                    #
+                    # LAZY values should also be ignore for event types that are
+                    # not membership.
+                    pass
+                else:
+                    added.append((event_type, state_key))
+
+    added_state_filter = StateFilter.from_types(added)
+
+    # Convert the list of state deltas to map from type to state_keys that have
+    # changed.
+    changed_types_to_state_keys: Dict[str, Set[str]] = {}
+    for event_type, state_key in state_deltas:
+        changed_types_to_state_keys.setdefault(event_type, set()).add(state_key)
+
+    # Figure out what changes we need to apply to the effective required state
+    # config.
+    for event_type, changed_state_keys in changed_types_to_state_keys.items():
+        old_state_keys = prev_required_state_map.get(event_type, set())
+        request_state_keys = request_required_state_map.get(event_type, set())
+
+        if old_state_keys == request_state_keys:
+            # No change.
+            continue
+
+        if request_state_keys - old_state_keys:
+            # We've expanded the set of state keys, so we just clobber the
+            # current set with the new set.
+            #
+            # We could also ensure that we keep entries where the state hasn't
+            # changed, but are no longer in the requested required state, but
+            # that's a sufficient edge case that we can ignore (as its only a
+            # performance optimization).
+            changes[event_type] = request_state_keys
+            continue
+
+        old_state_key_wildcard = StateValues.WILDCARD in old_state_keys
+        request_state_key_wildcard = StateValues.WILDCARD in request_state_keys
+
+        if old_state_key_wildcard != request_state_key_wildcard:
+            # If a state_key wildcard has been added or removed, we always update the
+            # effective room required state config to match the request.
+            changes[event_type] = request_state_keys
+            continue
+
+        if event_type == EventTypes.Member:
+            old_state_key_lazy = StateValues.LAZY in old_state_keys
+            request_state_key_lazy = StateValues.LAZY in request_state_keys
+
+            if old_state_key_lazy != request_state_key_lazy:
+                # If a "$LAZY" has been added or removed we always update the effective room
+                # required state config to match the request.
+                changes[event_type] = request_state_keys
+                continue
+
+        # Handle "$ME" values by adding "$ME" if the state key matches the user
+        # ID.
+        if user_id in changed_state_keys:
+            changed_state_keys.add(StateValues.ME)
+
+        # At this point there are no wildcards and no additions to the set of
+        # state keys requested, only deletions.
+        #
+        # We only remove state keys from the effective state if they've been
+        # removed from the request *and* the state has changed. This ensures
+        # that if a client removes and then re-adds a state key, we only send
+        # down the associated current state event if its changed (rather than
+        # sending down the same event twice).
+        invalidated = (old_state_keys - request_state_keys) & changed_state_keys
+        if invalidated:
+            changes[event_type] = old_state_keys - invalidated
+
+    if changes:
+        # Update the required state config based on the changes.
+        new_required_state_map = dict(prev_required_state_map)
+        for event_type, state_keys in changes.items():
+            if state_keys:
+                new_required_state_map[event_type] = state_keys
+            else:
+                # Remove entries with empty state keys.
+                new_required_state_map.pop(event_type, None)
+
+        return new_required_state_map, added_state_filter
+    else:
+        return None, added_state_filter
diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py
new file mode 100644
index 0000000000..0c77b52513
--- /dev/null
+++ b/synapse/handlers/sliding_sync/extensions.py
@@ -0,0 +1,862 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2023 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+import itertools
+import logging
+from typing import (
+    TYPE_CHECKING,
+    AbstractSet,
+    ChainMap,
+    Dict,
+    Mapping,
+    MutableMapping,
+    Optional,
+    Sequence,
+    Set,
+    cast,
+)
+
+from typing_extensions import assert_never
+
+from synapse.api.constants import AccountDataTypes, EduTypes
+from synapse.handlers.receipts import ReceiptEventSource
+from synapse.logging.opentracing import trace
+from synapse.storage.databases.main.receipts import ReceiptInRoom
+from synapse.types import (
+    DeviceListUpdates,
+    JsonMapping,
+    MultiWriterStreamToken,
+    SlidingSyncStreamToken,
+    StrCollection,
+    StreamToken,
+)
+from synapse.types.handlers.sliding_sync import (
+    HaveSentRoomFlag,
+    MutablePerConnectionState,
+    OperationType,
+    PerConnectionState,
+    SlidingSyncConfig,
+    SlidingSyncResult,
+)
+from synapse.util.async_helpers import concurrently_execute
+
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class SlidingSyncExtensionHandler:
+    """Handles the extensions to sliding sync."""
+
+    def __init__(self, hs: "HomeServer"):
+        self.store = hs.get_datastores().main
+        self.event_sources = hs.get_event_sources()
+        self.device_handler = hs.get_device_handler()
+        self.push_rules_handler = hs.get_push_rules_handler()
+
+    @trace
+    async def get_extensions_response(
+        self,
+        sync_config: SlidingSyncConfig,
+        previous_connection_state: "PerConnectionState",
+        new_connection_state: "MutablePerConnectionState",
+        actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
+        actual_room_ids: Set[str],
+        actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
+        to_token: StreamToken,
+        from_token: Optional[SlidingSyncStreamToken],
+    ) -> SlidingSyncResult.Extensions:
+        """Handle extension requests.
+
+        Args:
+            sync_config: Sync configuration
+            new_connection_state: Snapshot of the current per-connection state
+            new_per_connection_state: A mutable copy of the per-connection
+                state, used to record updates to the state during this request.
+            actual_lists: Sliding window API. A map of list key to list results in the
+                Sliding Sync response.
+            actual_room_ids: The actual room IDs in the the Sliding Sync response.
+            actual_room_response_map: A map of room ID to room results in the the
+                Sliding Sync response.
+            to_token: The point in the stream to sync up to.
+            from_token: The point in the stream to sync from.
+        """
+
+        if sync_config.extensions is None:
+            return SlidingSyncResult.Extensions()
+
+        to_device_response = None
+        if sync_config.extensions.to_device is not None:
+            to_device_response = await self.get_to_device_extension_response(
+                sync_config=sync_config,
+                to_device_request=sync_config.extensions.to_device,
+                to_token=to_token,
+            )
+
+        e2ee_response = None
+        if sync_config.extensions.e2ee is not None:
+            e2ee_response = await self.get_e2ee_extension_response(
+                sync_config=sync_config,
+                e2ee_request=sync_config.extensions.e2ee,
+                to_token=to_token,
+                from_token=from_token,
+            )
+
+        account_data_response = None
+        if sync_config.extensions.account_data is not None:
+            account_data_response = await self.get_account_data_extension_response(
+                sync_config=sync_config,
+                previous_connection_state=previous_connection_state,
+                new_connection_state=new_connection_state,
+                actual_lists=actual_lists,
+                actual_room_ids=actual_room_ids,
+                account_data_request=sync_config.extensions.account_data,
+                to_token=to_token,
+                from_token=from_token,
+            )
+
+        receipts_response = None
+        if sync_config.extensions.receipts is not None:
+            receipts_response = await self.get_receipts_extension_response(
+                sync_config=sync_config,
+                previous_connection_state=previous_connection_state,
+                new_connection_state=new_connection_state,
+                actual_lists=actual_lists,
+                actual_room_ids=actual_room_ids,
+                actual_room_response_map=actual_room_response_map,
+                receipts_request=sync_config.extensions.receipts,
+                to_token=to_token,
+                from_token=from_token,
+            )
+
+        typing_response = None
+        if sync_config.extensions.typing is not None:
+            typing_response = await self.get_typing_extension_response(
+                sync_config=sync_config,
+                actual_lists=actual_lists,
+                actual_room_ids=actual_room_ids,
+                actual_room_response_map=actual_room_response_map,
+                typing_request=sync_config.extensions.typing,
+                to_token=to_token,
+                from_token=from_token,
+            )
+
+        return SlidingSyncResult.Extensions(
+            to_device=to_device_response,
+            e2ee=e2ee_response,
+            account_data=account_data_response,
+            receipts=receipts_response,
+            typing=typing_response,
+        )
+
+    def find_relevant_room_ids_for_extension(
+        self,
+        requested_lists: Optional[StrCollection],
+        requested_room_ids: Optional[StrCollection],
+        actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
+        actual_room_ids: AbstractSet[str],
+    ) -> Set[str]:
+        """
+        Handle the reserved `lists`/`rooms` keys for extensions. Extensions should only
+        return results for rooms in the Sliding Sync response. This matches up the
+        requested rooms/lists with the actual lists/rooms in the Sliding Sync response.
+
+        {"lists": []}                    // Do not process any lists.
+        {"lists": ["rooms", "dms"]}      // Process only a subset of lists.
+        {"lists": ["*"]}                 // Process all lists defined in the Sliding Window API. (This is the default.)
+
+        {"rooms": []}                    // Do not process any specific rooms.
+        {"rooms": ["!a:b", "!c:d"]}      // Process only a subset of room subscriptions.
+        {"rooms": ["*"]}                 // Process all room subscriptions defined in the Room Subscription API. (This is the default.)
+
+        Args:
+            requested_lists: The `lists` from the extension request.
+            requested_room_ids: The `rooms` from the extension request.
+            actual_lists: The actual lists from the Sliding Sync response.
+            actual_room_ids: The actual room subscriptions from the Sliding Sync request.
+        """
+
+        # We only want to include account data for rooms that are already in the sliding
+        # sync response AND that were requested in the account data request.
+        relevant_room_ids: Set[str] = set()
+
+        # See what rooms from the room subscriptions we should get account data for
+        if requested_room_ids is not None:
+            for room_id in requested_room_ids:
+                # A wildcard means we process all rooms from the room subscriptions
+                if room_id == "*":
+                    relevant_room_ids.update(actual_room_ids)
+                    break
+
+                if room_id in actual_room_ids:
+                    relevant_room_ids.add(room_id)
+
+        # See what rooms from the sliding window lists we should get account data for
+        if requested_lists is not None:
+            for list_key in requested_lists:
+                # Just some typing because we share the variable name in multiple places
+                actual_list: Optional[SlidingSyncResult.SlidingWindowList] = None
+
+                # A wildcard means we process rooms from all lists
+                if list_key == "*":
+                    for actual_list in actual_lists.values():
+                        # We only expect a single SYNC operation for any list
+                        assert len(actual_list.ops) == 1
+                        sync_op = actual_list.ops[0]
+                        assert sync_op.op == OperationType.SYNC
+
+                        relevant_room_ids.update(sync_op.room_ids)
+
+                    break
+
+                actual_list = actual_lists.get(list_key)
+                if actual_list is not None:
+                    # We only expect a single SYNC operation for any list
+                    assert len(actual_list.ops) == 1
+                    sync_op = actual_list.ops[0]
+                    assert sync_op.op == OperationType.SYNC
+
+                    relevant_room_ids.update(sync_op.room_ids)
+
+        return relevant_room_ids
+
+    @trace
+    async def get_to_device_extension_response(
+        self,
+        sync_config: SlidingSyncConfig,
+        to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension,
+        to_token: StreamToken,
+    ) -> Optional[SlidingSyncResult.Extensions.ToDeviceExtension]:
+        """Handle to-device extension (MSC3885)
+
+        Args:
+            sync_config: Sync configuration
+            to_device_request: The to-device extension from the request
+            to_token: The point in the stream to sync up to.
+        """
+        user_id = sync_config.user.to_string()
+        device_id = sync_config.requester.device_id
+
+        # Skip if the extension is not enabled
+        if not to_device_request.enabled:
+            return None
+
+        # Check that this request has a valid device ID (not all requests have
+        # to belong to a device, and so device_id is None)
+        if device_id is None:
+            return SlidingSyncResult.Extensions.ToDeviceExtension(
+                next_batch=f"{to_token.to_device_key}",
+                events=[],
+            )
+
+        since_stream_id = 0
+        if to_device_request.since is not None:
+            # We've already validated this is an int.
+            since_stream_id = int(to_device_request.since)
+
+            if to_token.to_device_key < since_stream_id:
+                # The since token is ahead of our current token, so we return an
+                # empty response.
+                logger.warning(
+                    "Got to-device.since from the future. since token: %r is ahead of our current to_device stream position: %r",
+                    since_stream_id,
+                    to_token.to_device_key,
+                )
+                return SlidingSyncResult.Extensions.ToDeviceExtension(
+                    next_batch=to_device_request.since,
+                    events=[],
+                )
+
+            # Delete everything before the given since token, as we know the
+            # device must have received them.
+            deleted = await self.store.delete_messages_for_device(
+                user_id=user_id,
+                device_id=device_id,
+                up_to_stream_id=since_stream_id,
+            )
+
+            logger.debug(
+                "Deleted %d to-device messages up to %d for %s",
+                deleted,
+                since_stream_id,
+                user_id,
+            )
+
+        messages, stream_id = await self.store.get_messages_for_device(
+            user_id=user_id,
+            device_id=device_id,
+            from_stream_id=since_stream_id,
+            to_stream_id=to_token.to_device_key,
+            limit=min(to_device_request.limit, 100),  # Limit to at most 100 events
+        )
+
+        return SlidingSyncResult.Extensions.ToDeviceExtension(
+            next_batch=f"{stream_id}",
+            events=messages,
+        )
+
+    @trace
+    async def get_e2ee_extension_response(
+        self,
+        sync_config: SlidingSyncConfig,
+        e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension,
+        to_token: StreamToken,
+        from_token: Optional[SlidingSyncStreamToken],
+    ) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]:
+        """Handle E2EE device extension (MSC3884)
+
+        Args:
+            sync_config: Sync configuration
+            e2ee_request: The e2ee extension from the request
+            to_token: The point in the stream to sync up to.
+            from_token: The point in the stream to sync from.
+        """
+        user_id = sync_config.user.to_string()
+        device_id = sync_config.requester.device_id
+
+        # Skip if the extension is not enabled
+        if not e2ee_request.enabled:
+            return None
+
+        device_list_updates: Optional[DeviceListUpdates] = None
+        if from_token is not None:
+            # TODO: This should take into account the `from_token` and `to_token`
+            device_list_updates = await self.device_handler.get_user_ids_changed(
+                user_id=user_id,
+                from_token=from_token.stream_token,
+            )
+
+        device_one_time_keys_count: Mapping[str, int] = {}
+        device_unused_fallback_key_types: Sequence[str] = []
+        if device_id:
+            # TODO: We should have a way to let clients differentiate between the states of:
+            #   * no change in OTK count since the provided since token
+            #   * the server has zero OTKs left for this device
+            #  Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
+            device_one_time_keys_count = await self.store.count_e2e_one_time_keys(
+                user_id, device_id
+            )
+            device_unused_fallback_key_types = (
+                await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
+            )
+
+        return SlidingSyncResult.Extensions.E2eeExtension(
+            device_list_updates=device_list_updates,
+            device_one_time_keys_count=device_one_time_keys_count,
+            device_unused_fallback_key_types=device_unused_fallback_key_types,
+        )
+
+    @trace
+    async def get_account_data_extension_response(
+        self,
+        sync_config: SlidingSyncConfig,
+        previous_connection_state: "PerConnectionState",
+        new_connection_state: "MutablePerConnectionState",
+        actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
+        actual_room_ids: Set[str],
+        account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension,
+        to_token: StreamToken,
+        from_token: Optional[SlidingSyncStreamToken],
+    ) -> Optional[SlidingSyncResult.Extensions.AccountDataExtension]:
+        """Handle Account Data extension (MSC3959)
+
+        Args:
+            sync_config: Sync configuration
+            actual_lists: Sliding window API. A map of list key to list results in the
+                Sliding Sync response.
+            actual_room_ids: The actual room IDs in the the Sliding Sync response.
+            account_data_request: The account_data extension from the request
+            to_token: The point in the stream to sync up to.
+            from_token: The point in the stream to sync from.
+        """
+        user_id = sync_config.user.to_string()
+
+        # Skip if the extension is not enabled
+        if not account_data_request.enabled:
+            return None
+
+        global_account_data_map: Mapping[str, JsonMapping] = {}
+        if from_token is not None:
+            # TODO: This should take into account the `from_token` and `to_token`
+            global_account_data_map = (
+                await self.store.get_updated_global_account_data_for_user(
+                    user_id, from_token.stream_token.account_data_key
+                )
+            )
+
+            # TODO: This should take into account the `from_token` and `to_token`
+            have_push_rules_changed = await self.store.have_push_rules_changed_for_user(
+                user_id, from_token.stream_token.push_rules_key
+            )
+            if have_push_rules_changed:
+                # TODO: This should take into account the `from_token` and `to_token`
+                global_account_data_map[
+                    AccountDataTypes.PUSH_RULES
+                ] = await self.push_rules_handler.push_rules_for_user(sync_config.user)
+        else:
+            # TODO: This should take into account the `to_token`
+            immutable_global_account_data_map = (
+                await self.store.get_global_account_data_for_user(user_id)
+            )
+
+            # Use a `ChainMap` to avoid copying the immutable data from the cache
+            global_account_data_map = ChainMap(
+                {
+                    # TODO: This should take into account the `to_token`
+                    AccountDataTypes.PUSH_RULES: await self.push_rules_handler.push_rules_for_user(
+                        sync_config.user
+                    )
+                },
+                # Cast is safe because `ChainMap` only mutates the top-most map,
+                # see https://github.com/python/typeshed/issues/8430
+                cast(
+                    MutableMapping[str, JsonMapping], immutable_global_account_data_map
+                ),
+            )
+
+        # Fetch room account data
+        #
+        account_data_by_room_map: MutableMapping[str, Mapping[str, JsonMapping]] = {}
+        relevant_room_ids = self.find_relevant_room_ids_for_extension(
+            requested_lists=account_data_request.lists,
+            requested_room_ids=account_data_request.rooms,
+            actual_lists=actual_lists,
+            actual_room_ids=actual_room_ids,
+        )
+        if len(relevant_room_ids) > 0:
+            # We need to handle the different cases depending on if we have sent
+            # down account data previously or not, so we split the relevant
+            # rooms up into different collections based on status.
+            live_rooms = set()
+            previously_rooms: Dict[str, int] = {}
+            initial_rooms = set()
+
+            for room_id in relevant_room_ids:
+                if not from_token:
+                    initial_rooms.add(room_id)
+                    continue
+
+                room_status = previous_connection_state.account_data.have_sent_room(
+                    room_id
+                )
+                if room_status.status == HaveSentRoomFlag.LIVE:
+                    live_rooms.add(room_id)
+                elif room_status.status == HaveSentRoomFlag.PREVIOUSLY:
+                    assert room_status.last_token is not None
+                    previously_rooms[room_id] = room_status.last_token
+                elif room_status.status == HaveSentRoomFlag.NEVER:
+                    initial_rooms.add(room_id)
+                else:
+                    assert_never(room_status.status)
+
+            # We fetch all room account data since the from_token. This is so
+            # that we can record which rooms have updates that haven't been sent
+            # down.
+            #
+            # Mapping from room_id to mapping of `type` to `content` of room account
+            # data events.
+            all_updates_since_the_from_token: Mapping[
+                str, Mapping[str, JsonMapping]
+            ] = {}
+            if from_token is not None:
+                # TODO: This should take into account the `from_token` and `to_token`
+                all_updates_since_the_from_token = (
+                    await self.store.get_updated_room_account_data_for_user(
+                        user_id, from_token.stream_token.account_data_key
+                    )
+                )
+
+                # Add room tags
+                #
+                # TODO: This should take into account the `from_token` and `to_token`
+                tags_by_room = await self.store.get_updated_tags(
+                    user_id, from_token.stream_token.account_data_key
+                )
+                for room_id, tags in tags_by_room.items():
+                    all_updates_since_the_from_token.setdefault(room_id, {})[
+                        AccountDataTypes.TAG
+                    ] = {"tags": tags}
+
+            # For live rooms we just get the updates from `all_updates_since_the_from_token`
+            if live_rooms:
+                for room_id in all_updates_since_the_from_token.keys() & live_rooms:
+                    account_data_by_room_map[room_id] = (
+                        all_updates_since_the_from_token[room_id]
+                    )
+
+            # For previously and initial rooms we query each room individually.
+            if previously_rooms or initial_rooms:
+
+                async def handle_previously(room_id: str) -> None:
+                    # Either get updates or all account data in the room
+                    # depending on if the room state is PREVIOUSLY or NEVER.
+                    previous_token = previously_rooms.get(room_id)
+                    if previous_token is not None:
+                        room_account_data = await (
+                            self.store.get_updated_room_account_data_for_user_for_room(
+                                user_id=user_id,
+                                room_id=room_id,
+                                from_stream_id=previous_token,
+                                to_stream_id=to_token.account_data_key,
+                            )
+                        )
+
+                        # Add room tags
+                        changed = await self.store.has_tags_changed_for_room(
+                            user_id=user_id,
+                            room_id=room_id,
+                            from_stream_id=previous_token,
+                            to_stream_id=to_token.account_data_key,
+                        )
+                        if changed:
+                            # XXX: Ideally, this should take into account the `to_token`
+                            # and return the set of tags at that time but we don't track
+                            # changes to tags so we just have to return all tags for the
+                            # room.
+                            immutable_tag_map = await self.store.get_tags_for_room(
+                                user_id, room_id
+                            )
+                            room_account_data[AccountDataTypes.TAG] = {
+                                "tags": immutable_tag_map
+                            }
+
+                        # Only add an entry if there were any updates.
+                        if room_account_data:
+                            account_data_by_room_map[room_id] = room_account_data
+                    else:
+                        # TODO: This should take into account the `to_token`
+                        immutable_room_account_data = (
+                            await self.store.get_account_data_for_room(user_id, room_id)
+                        )
+
+                        # Add room tags
+                        #
+                        # XXX: Ideally, this should take into account the `to_token`
+                        # and return the set of tags at that time but we don't track
+                        # changes to tags so we just have to return all tags for the
+                        # room.
+                        immutable_tag_map = await self.store.get_tags_for_room(
+                            user_id, room_id
+                        )
+
+                        account_data_by_room_map[room_id] = ChainMap(
+                            {AccountDataTypes.TAG: {"tags": immutable_tag_map}}
+                            if immutable_tag_map
+                            else {},
+                            # Cast is safe because `ChainMap` only mutates the top-most map,
+                            # see https://github.com/python/typeshed/issues/8430
+                            cast(
+                                MutableMapping[str, JsonMapping],
+                                immutable_room_account_data,
+                            ),
+                        )
+
+                # We handle these rooms concurrently to speed it up.
+                await concurrently_execute(
+                    handle_previously,
+                    previously_rooms.keys() | initial_rooms,
+                    limit=20,
+                )
+
+            # Now record which rooms are now up to data, and which rooms have
+            # pending updates to send.
+            new_connection_state.account_data.record_sent_rooms(previously_rooms.keys())
+            new_connection_state.account_data.record_sent_rooms(initial_rooms)
+            missing_updates = (
+                all_updates_since_the_from_token.keys() - relevant_room_ids
+            )
+            if missing_updates:
+                # If we have missing updates then we must have had a from_token.
+                assert from_token is not None
+
+                new_connection_state.account_data.record_unsent_rooms(
+                    missing_updates, from_token.stream_token.account_data_key
+                )
+
+        return SlidingSyncResult.Extensions.AccountDataExtension(
+            global_account_data_map=global_account_data_map,
+            account_data_by_room_map=account_data_by_room_map,
+        )
+
+    @trace
+    async def get_receipts_extension_response(
+        self,
+        sync_config: SlidingSyncConfig,
+        previous_connection_state: "PerConnectionState",
+        new_connection_state: "MutablePerConnectionState",
+        actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
+        actual_room_ids: Set[str],
+        actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
+        receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension,
+        to_token: StreamToken,
+        from_token: Optional[SlidingSyncStreamToken],
+    ) -> Optional[SlidingSyncResult.Extensions.ReceiptsExtension]:
+        """Handle Receipts extension (MSC3960)
+
+        Args:
+            sync_config: Sync configuration
+            previous_connection_state: The current per-connection state
+            new_connection_state: A mutable copy of the per-connection
+                state, used to record updates to the state.
+            actual_lists: Sliding window API. A map of list key to list results in the
+                Sliding Sync response.
+            actual_room_ids: The actual room IDs in the the Sliding Sync response.
+            actual_room_response_map: A map of room ID to room results in the the
+                Sliding Sync response.
+            account_data_request: The account_data extension from the request
+            to_token: The point in the stream to sync up to.
+            from_token: The point in the stream to sync from.
+        """
+        # Skip if the extension is not enabled
+        if not receipts_request.enabled:
+            return None
+
+        relevant_room_ids = self.find_relevant_room_ids_for_extension(
+            requested_lists=receipts_request.lists,
+            requested_room_ids=receipts_request.rooms,
+            actual_lists=actual_lists,
+            actual_room_ids=actual_room_ids,
+        )
+
+        room_id_to_receipt_map: Dict[str, JsonMapping] = {}
+        if len(relevant_room_ids) > 0:
+            # We need to handle the different cases depending on if we have sent
+            # down receipts previously or not, so we split the relevant rooms
+            # up into different collections based on status.
+            live_rooms = set()
+            previously_rooms: Dict[str, MultiWriterStreamToken] = {}
+            initial_rooms = set()
+
+            for room_id in relevant_room_ids:
+                if not from_token:
+                    initial_rooms.add(room_id)
+                    continue
+
+                # If we're sending down the room from scratch again for some
+                # reason, we should always resend the receipts as well
+                # (regardless of if we've sent them down before). This is to
+                # mimic the behaviour of what happens on initial sync, where you
+                # get a chunk of timeline with all of the corresponding receipts
+                # for the events in the timeline.
+                #
+                # We also resend down receipts when we "expand" the timeline,
+                # (see the "XXX: Odd behavior" in
+                # `synapse.handlers.sliding_sync`).
+                room_result = actual_room_response_map.get(room_id)
+                if room_result is not None:
+                    if room_result.initial or room_result.unstable_expanded_timeline:
+                        initial_rooms.add(room_id)
+                        continue
+
+                room_status = previous_connection_state.receipts.have_sent_room(room_id)
+                if room_status.status == HaveSentRoomFlag.LIVE:
+                    live_rooms.add(room_id)
+                elif room_status.status == HaveSentRoomFlag.PREVIOUSLY:
+                    assert room_status.last_token is not None
+                    previously_rooms[room_id] = room_status.last_token
+                elif room_status.status == HaveSentRoomFlag.NEVER:
+                    initial_rooms.add(room_id)
+                else:
+                    assert_never(room_status.status)
+
+            # The set of receipts that we fetched. Private receipts need to be
+            # filtered out before returning.
+            fetched_receipts = []
+
+            # For live rooms we just fetch all receipts in those rooms since the
+            # `since` token.
+            if live_rooms:
+                assert from_token is not None
+                receipts = await self.store.get_linearized_receipts_for_rooms(
+                    room_ids=live_rooms,
+                    from_key=from_token.stream_token.receipt_key,
+                    to_key=to_token.receipt_key,
+                )
+                fetched_receipts.extend(receipts)
+
+            # For rooms we've previously sent down, but aren't up to date, we
+            # need to use the from token from the room status.
+            if previously_rooms:
+                # Fetch any missing rooms concurrently.
+
+                async def handle_previously_room(room_id: str) -> None:
+                    receipt_token = previously_rooms[room_id]
+                    # TODO: Limit the number of receipts we're about to send down
+                    # for the room, if its too many we should TODO
+                    previously_receipts = (
+                        await self.store.get_linearized_receipts_for_room(
+                            room_id=room_id,
+                            from_key=receipt_token,
+                            to_key=to_token.receipt_key,
+                        )
+                    )
+                    fetched_receipts.extend(previously_receipts)
+
+                await concurrently_execute(
+                    handle_previously_room, previously_rooms.keys(), 20
+                )
+
+            if initial_rooms:
+                # We also always send down receipts for the current user.
+                user_receipts = (
+                    await self.store.get_linearized_receipts_for_user_in_rooms(
+                        user_id=sync_config.user.to_string(),
+                        room_ids=initial_rooms,
+                        to_key=to_token.receipt_key,
+                    )
+                )
+
+                # For rooms we haven't previously sent down, we could send all receipts
+                # from that room but we only want to include receipts for events
+                # in the timeline to avoid bloating and blowing up the sync response
+                # as the number of users in the room increases. (this behavior is part of the spec)
+                initial_rooms_and_event_ids = [
+                    (room_id, event.event_id)
+                    for room_id in initial_rooms
+                    if room_id in actual_room_response_map
+                    for event in actual_room_response_map[room_id].timeline_events
+                ]
+                initial_receipts = await self.store.get_linearized_receipts_for_events(
+                    room_and_event_ids=initial_rooms_and_event_ids,
+                )
+
+                # Combine the receipts for a room and add them to
+                # `fetched_receipts`
+                for room_id in initial_receipts.keys() | user_receipts.keys():
+                    receipt_content = ReceiptInRoom.merge_to_content(
+                        list(
+                            itertools.chain(
+                                initial_receipts.get(room_id, []),
+                                user_receipts.get(room_id, []),
+                            )
+                        )
+                    )
+
+                    fetched_receipts.append(
+                        {
+                            "room_id": room_id,
+                            "type": EduTypes.RECEIPT,
+                            "content": receipt_content,
+                        }
+                    )
+
+            fetched_receipts = ReceiptEventSource.filter_out_private_receipts(
+                fetched_receipts, sync_config.user.to_string()
+            )
+
+            for receipt in fetched_receipts:
+                # These fields should exist for every receipt
+                room_id = receipt["room_id"]
+                type = receipt["type"]
+                content = receipt["content"]
+
+                room_id_to_receipt_map[room_id] = {"type": type, "content": content}
+
+            # Update the per-connection state to track which rooms we have sent
+            # all the receipts for.
+            new_connection_state.receipts.record_sent_rooms(previously_rooms.keys())
+            new_connection_state.receipts.record_sent_rooms(initial_rooms)
+
+        if from_token:
+            # Now find the set of rooms that may have receipts that we're not sending
+            # down. We only need to check rooms that we have previously returned
+            # receipts for (in `previous_connection_state`) because we only care about
+            # updating `LIVE` rooms to `PREVIOUSLY`. The `PREVIOUSLY` rooms will just
+            # stay pointing at their previous position so we don't need to waste time
+            # checking those and since we default to `NEVER`, rooms that were `NEVER`
+            # sent before don't need to be recorded as we'll handle them correctly when
+            # they come into range for the first time.
+            rooms_no_receipts = [
+                room_id
+                for room_id, room_status in previous_connection_state.receipts._statuses.items()
+                if room_status.status == HaveSentRoomFlag.LIVE
+                and room_id not in relevant_room_ids
+            ]
+            changed_rooms = await self.store.get_rooms_with_receipts_between(
+                rooms_no_receipts,
+                from_key=from_token.stream_token.receipt_key,
+                to_key=to_token.receipt_key,
+            )
+            new_connection_state.receipts.record_unsent_rooms(
+                changed_rooms, from_token.stream_token.receipt_key
+            )
+
+        return SlidingSyncResult.Extensions.ReceiptsExtension(
+            room_id_to_receipt_map=room_id_to_receipt_map,
+        )
+
+    async def get_typing_extension_response(
+        self,
+        sync_config: SlidingSyncConfig,
+        actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
+        actual_room_ids: Set[str],
+        actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
+        typing_request: SlidingSyncConfig.Extensions.TypingExtension,
+        to_token: StreamToken,
+        from_token: Optional[SlidingSyncStreamToken],
+    ) -> Optional[SlidingSyncResult.Extensions.TypingExtension]:
+        """Handle Typing Notification extension (MSC3961)
+
+        Args:
+            sync_config: Sync configuration
+            actual_lists: Sliding window API. A map of list key to list results in the
+                Sliding Sync response.
+            actual_room_ids: The actual room IDs in the the Sliding Sync response.
+            actual_room_response_map: A map of room ID to room results in the the
+                Sliding Sync response.
+            account_data_request: The account_data extension from the request
+            to_token: The point in the stream to sync up to.
+            from_token: The point in the stream to sync from.
+        """
+        # Skip if the extension is not enabled
+        if not typing_request.enabled:
+            return None
+
+        relevant_room_ids = self.find_relevant_room_ids_for_extension(
+            requested_lists=typing_request.lists,
+            requested_room_ids=typing_request.rooms,
+            actual_lists=actual_lists,
+            actual_room_ids=actual_room_ids,
+        )
+
+        room_id_to_typing_map: Dict[str, JsonMapping] = {}
+        if len(relevant_room_ids) > 0:
+            # Note: We don't need to take connection tracking into account for typing
+            # notifications because they'll get anything still relevant and hasn't timed
+            # out when the room comes into range. We consider the gap where the room
+            # fell out of range, as long enough for any typing notifications to have
+            # timed out (it's not worth the 30 seconds of data we may have missed).
+            typing_source = self.event_sources.sources.typing
+            typing_notifications, _ = await typing_source.get_new_events(
+                user=sync_config.user,
+                from_key=(from_token.stream_token.typing_key if from_token else 0),
+                to_key=to_token.typing_key,
+                # This is a dummy value and isn't used in the function
+                limit=0,
+                room_ids=relevant_room_ids,
+                is_guest=False,
+            )
+
+            for typing_notification in typing_notifications:
+                # These fields should exist for every typing notification
+                room_id = typing_notification["room_id"]
+                type = typing_notification["type"]
+                content = typing_notification["content"]
+
+                room_id_to_typing_map[room_id] = {"type": type, "content": content}
+
+        return SlidingSyncResult.Extensions.TypingExtension(
+            room_id_to_typing_map=room_id_to_typing_map,
+        )
diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py
new file mode 100644
index 0000000000..a1730b7e05
--- /dev/null
+++ b/synapse/handlers/sliding_sync/room_lists.py
@@ -0,0 +1,2168 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2023 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+
+import enum
+import logging
+from itertools import chain
+from typing import (
+    TYPE_CHECKING,
+    AbstractSet,
+    Dict,
+    List,
+    Literal,
+    Mapping,
+    Optional,
+    Set,
+    Tuple,
+    Union,
+    cast,
+)
+
+import attr
+from immutabledict import immutabledict
+from typing_extensions import assert_never
+
+from synapse.api.constants import (
+    AccountDataTypes,
+    EventContentFields,
+    EventTypes,
+    Membership,
+)
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.events import StrippedStateEvent
+from synapse.events.utils import parse_stripped_state_event
+from synapse.logging.opentracing import start_active_span, trace
+from synapse.storage.databases.main.state import (
+    ROOM_UNKNOWN_SENTINEL,
+    Sentinel as StateSentinel,
+)
+from synapse.storage.databases.main.stream import CurrentStateDeltaMembership
+from synapse.storage.roommember import (
+    RoomsForUser,
+    RoomsForUserSlidingSync,
+    RoomsForUserStateReset,
+)
+from synapse.types import (
+    MutableStateMap,
+    RoomStreamToken,
+    StateMap,
+    StrCollection,
+    StreamKeyType,
+    StreamToken,
+    UserID,
+)
+from synapse.types.handlers.sliding_sync import (
+    HaveSentRoomFlag,
+    OperationType,
+    PerConnectionState,
+    RoomSyncConfig,
+    SlidingSyncConfig,
+    SlidingSyncResult,
+)
+from synapse.types.state import StateFilter
+
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
+
+logger = logging.getLogger(__name__)
+
+
+class Sentinel(enum.Enum):
+    # defining a sentinel in this way allows mypy to correctly handle the
+    # type of a dictionary lookup and subsequent type narrowing.
+    UNSET_SENTINEL = object()
+
+
+# Helper definition for the types that we might return. We do this to avoid
+# copying data between types (which can be expensive for many rooms).
+RoomsForUserType = Union[RoomsForUserStateReset, RoomsForUser, RoomsForUserSlidingSync]
+
+
+@attr.s(auto_attribs=True, slots=True, frozen=True)
+class SlidingSyncInterestedRooms:
+    """The set of rooms and metadata a client is interested in based on their
+    sliding sync request.
+
+    Returned by `compute_interested_rooms`.
+
+    Attributes:
+        lists: A mapping from list name to the list result for the response
+        relevant_room_map: A map from rooms that match the sync request to
+            their room sync config.
+        relevant_rooms_to_send_map: Subset of `relevant_room_map` that
+            includes the rooms that *may* have relevant updates. Rooms not
+            in this map will definitely not have room updates (though
+            extensions may have updates in these rooms).
+        newly_joined_rooms: The set of rooms that were joined in the token range
+            and the user is still joined to at the end of this range.
+        newly_left_rooms: The set of rooms that we left in the token range
+            and are still "leave" at the end of this range.
+        dm_room_ids: The set of rooms the user consider as direct-message (DM) rooms
+    """
+
+    lists: Mapping[str, SlidingSyncResult.SlidingWindowList]
+    relevant_room_map: Mapping[str, RoomSyncConfig]
+    relevant_rooms_to_send_map: Mapping[str, RoomSyncConfig]
+    all_rooms: Set[str]
+    room_membership_for_user_map: Mapping[str, RoomsForUserType]
+
+    newly_joined_rooms: AbstractSet[str]
+    newly_left_rooms: AbstractSet[str]
+    dm_room_ids: AbstractSet[str]
+
+    @staticmethod
+    def empty() -> "SlidingSyncInterestedRooms":
+        return SlidingSyncInterestedRooms(
+            lists={},
+            relevant_room_map={},
+            relevant_rooms_to_send_map={},
+            all_rooms=set(),
+            room_membership_for_user_map={},
+            newly_joined_rooms=set(),
+            newly_left_rooms=set(),
+            dm_room_ids=set(),
+        )
+
+
+def filter_membership_for_sync(
+    *,
+    user_id: str,
+    room_membership_for_user: RoomsForUserType,
+    newly_left: bool,
+) -> bool:
+    """
+    Returns True if the membership event should be included in the sync response,
+    otherwise False.
+
+    Attributes:
+        user_id: The user ID that the membership applies to
+        room_membership_for_user: Membership information for the user in the room
+    """
+
+    membership = room_membership_for_user.membership
+    sender = room_membership_for_user.sender
+
+    # We want to allow everything except rooms the user has left unless `newly_left`
+    # because we want everything that's *still* relevant to the user. We include
+    # `newly_left` rooms because the last event that the user should see is their own
+    # leave event.
+    #
+    # A leave != kick. This logic includes kicks (leave events where the sender is not
+    # the same user).
+    #
+    # When `sender=None`, it means that a state reset happened that removed the user
+    # from the room without a corresponding leave event. We can just remove the rooms
+    # since they are no longer relevant to the user but will still appear if they are
+    # `newly_left`.
+    return (
+        # Anything except leave events
+        membership != Membership.LEAVE
+        # Unless...
+        or newly_left
+        # Allow kicks
+        or (membership == Membership.LEAVE and sender not in (user_id, None))
+    )
+
+
+class SlidingSyncRoomLists:
+    """Handles calculating the room lists from sliding sync requests"""
+
+    def __init__(self, hs: "HomeServer"):
+        self.store = hs.get_datastores().main
+        self.storage_controllers = hs.get_storage_controllers()
+        self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
+        self.is_mine_id = hs.is_mine_id
+
+    async def compute_interested_rooms(
+        self,
+        sync_config: SlidingSyncConfig,
+        previous_connection_state: "PerConnectionState",
+        to_token: StreamToken,
+        from_token: Optional[StreamToken],
+    ) -> SlidingSyncInterestedRooms:
+        """Fetch the set of rooms that match the request"""
+        has_lists = sync_config.lists is not None and len(sync_config.lists) > 0
+        has_room_subscriptions = (
+            sync_config.room_subscriptions is not None
+            and len(sync_config.room_subscriptions) > 0
+        )
+
+        if not has_lists and not has_room_subscriptions:
+            return SlidingSyncInterestedRooms.empty()
+
+        if await self.store.have_finished_sliding_sync_background_jobs():
+            return await self._compute_interested_rooms_new_tables(
+                sync_config=sync_config,
+                previous_connection_state=previous_connection_state,
+                to_token=to_token,
+                from_token=from_token,
+            )
+        else:
+            # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+            # foreground update for
+            # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+            # https://github.com/element-hq/synapse/issues/17623)
+            return await self._compute_interested_rooms_fallback(
+                sync_config=sync_config,
+                previous_connection_state=previous_connection_state,
+                to_token=to_token,
+                from_token=from_token,
+            )
+
+    @trace
+    async def _compute_interested_rooms_new_tables(
+        self,
+        sync_config: SlidingSyncConfig,
+        previous_connection_state: "PerConnectionState",
+        to_token: StreamToken,
+        from_token: Optional[StreamToken],
+    ) -> SlidingSyncInterestedRooms:
+        """Implementation of `compute_interested_rooms` using new sliding sync db tables."""
+        user_id = sync_config.user.to_string()
+
+        # Assemble sliding window lists
+        lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {}
+        # Keep track of the rooms that we can display and need to fetch more info about
+        relevant_room_map: Dict[str, RoomSyncConfig] = {}
+        # The set of room IDs of all rooms that could appear in any list. These
+        # include rooms that are outside the list ranges.
+        all_rooms: Set[str] = set()
+
+        # Note: this won't include rooms the user has left themselves. We add back
+        # `newly_left` rooms below. This is more efficient than fetching all rooms and
+        # then filtering out the old left rooms.
+        room_membership_for_user_map = await self.store.get_sliding_sync_rooms_for_user(
+            user_id
+        )
+
+        # Remove invites from ignored users
+        ignored_users = await self.store.ignored_users(user_id)
+        if ignored_users:
+            # TODO: It would be nice to avoid these copies
+            room_membership_for_user_map = dict(room_membership_for_user_map)
+            # Make a copy so we don't run into an error: `dictionary changed size during
+            # iteration`, when we remove items
+            for room_id in list(room_membership_for_user_map.keys()):
+                room_for_user_sliding_sync = room_membership_for_user_map[room_id]
+                if (
+                    room_for_user_sliding_sync.membership == Membership.INVITE
+                    and room_for_user_sliding_sync.sender in ignored_users
+                ):
+                    room_membership_for_user_map.pop(room_id, None)
+
+        changes = await self._get_rewind_changes_to_current_membership_to_token(
+            sync_config.user, room_membership_for_user_map, to_token=to_token
+        )
+        if changes:
+            # TODO: It would be nice to avoid these copies
+            room_membership_for_user_map = dict(room_membership_for_user_map)
+            for room_id, change in changes.items():
+                if change is None:
+                    # Remove rooms that the user joined after the `to_token`
+                    room_membership_for_user_map.pop(room_id, None)
+                    continue
+
+                existing_room = room_membership_for_user_map.get(room_id)
+                if existing_room is not None:
+                    # Update room membership events to the point in time of the `to_token`
+                    room_membership_for_user_map[room_id] = RoomsForUserSlidingSync(
+                        room_id=room_id,
+                        sender=change.sender,
+                        membership=change.membership,
+                        event_id=change.event_id,
+                        event_pos=change.event_pos,
+                        room_version_id=change.room_version_id,
+                        # We keep the state of the room though
+                        has_known_state=existing_room.has_known_state,
+                        room_type=existing_room.room_type,
+                        is_encrypted=existing_room.is_encrypted,
+                    )
+
+        (
+            newly_joined_room_ids,
+            newly_left_room_map,
+        ) = await self._get_newly_joined_and_left_rooms(
+            user_id, from_token=from_token, to_token=to_token
+        )
+        dm_room_ids = await self._get_dm_rooms_for_user(user_id)
+
+        # Add back `newly_left` rooms (rooms left in the from -> to token range).
+        #
+        # We do this because `get_sliding_sync_rooms_for_user(...)` doesn't include
+        # rooms that the user left themselves as it's more efficient to add them back
+        # here than to fetch all rooms and then filter out the old left rooms. The user
+        # only leaves a room once in a blue moon so this barely needs to run.
+        #
+        missing_newly_left_rooms = (
+            newly_left_room_map.keys() - room_membership_for_user_map.keys()
+        )
+        if missing_newly_left_rooms:
+            # TODO: It would be nice to avoid these copies
+            room_membership_for_user_map = dict(room_membership_for_user_map)
+            for room_id in missing_newly_left_rooms:
+                newly_left_room_for_user = newly_left_room_map[room_id]
+                # This should be a given
+                assert newly_left_room_for_user.membership == Membership.LEAVE
+
+                # Add back `newly_left` rooms
+                #
+                # Check for membership and state in the Sliding Sync tables as it's just
+                # another membership
+                newly_left_room_for_user_sliding_sync = (
+                    await self.store.get_sliding_sync_room_for_user(user_id, room_id)
+                )
+                # If the membership exists, it's just a normal user left the room on
+                # their own
+                if newly_left_room_for_user_sliding_sync is not None:
+                    room_membership_for_user_map[room_id] = (
+                        newly_left_room_for_user_sliding_sync
+                    )
+
+                    change = changes.get(room_id)
+                    if change is not None:
+                        # Update room membership events to the point in time of the `to_token`
+                        room_membership_for_user_map[room_id] = RoomsForUserSlidingSync(
+                            room_id=room_id,
+                            sender=change.sender,
+                            membership=change.membership,
+                            event_id=change.event_id,
+                            event_pos=change.event_pos,
+                            room_version_id=change.room_version_id,
+                            # We keep the state of the room though
+                            has_known_state=newly_left_room_for_user_sliding_sync.has_known_state,
+                            room_type=newly_left_room_for_user_sliding_sync.room_type,
+                            is_encrypted=newly_left_room_for_user_sliding_sync.is_encrypted,
+                        )
+
+                # If we are `newly_left` from the room but can't find any membership,
+                # then we have been "state reset" out of the room
+                else:
+                    # Get the state at the time. We can't read from the Sliding Sync
+                    # tables because the user has no membership in the room according to
+                    # the state (thanks to the state reset).
+                    #
+                    # Note: `room_type` never changes, so we can just get current room
+                    # type
+                    room_type = await self.store.get_room_type(room_id)
+                    has_known_state = room_type is not ROOM_UNKNOWN_SENTINEL
+                    if isinstance(room_type, StateSentinel):
+                        room_type = None
+
+                    # Get the encryption status at the time of the token
+                    is_encrypted = await self.get_is_encrypted_for_room_at_token(
+                        room_id,
+                        newly_left_room_for_user.event_pos.to_room_stream_token(),
+                    )
+
+                    room_membership_for_user_map[room_id] = RoomsForUserSlidingSync(
+                        room_id=room_id,
+                        sender=newly_left_room_for_user.sender,
+                        membership=newly_left_room_for_user.membership,
+                        event_id=newly_left_room_for_user.event_id,
+                        event_pos=newly_left_room_for_user.event_pos,
+                        room_version_id=newly_left_room_for_user.room_version_id,
+                        has_known_state=has_known_state,
+                        room_type=room_type,
+                        is_encrypted=is_encrypted,
+                    )
+
+        if sync_config.lists:
+            sync_room_map = room_membership_for_user_map
+            with start_active_span("assemble_sliding_window_lists"):
+                for list_key, list_config in sync_config.lists.items():
+                    # Apply filters
+                    filtered_sync_room_map = sync_room_map
+                    if list_config.filters is not None:
+                        filtered_sync_room_map = await self.filter_rooms_using_tables(
+                            user_id,
+                            sync_room_map,
+                            previous_connection_state,
+                            list_config.filters,
+                            to_token,
+                            dm_room_ids,
+                        )
+
+                    # Find which rooms are partially stated and may need to be filtered out
+                    # depending on the `required_state` requested (see below).
+                    partial_state_rooms = await self.store.get_partial_rooms()
+
+                    # Since creating the `RoomSyncConfig` takes some work, let's just do it
+                    # once.
+                    room_sync_config = RoomSyncConfig.from_room_config(list_config)
+
+                    # Exclude partially-stated rooms if we must wait for the room to be
+                    # fully-stated
+                    if room_sync_config.must_await_full_state(self.is_mine_id):
+                        filtered_sync_room_map = {
+                            room_id: room
+                            for room_id, room in filtered_sync_room_map.items()
+                            if room_id not in partial_state_rooms
+                        }
+
+                    all_rooms.update(filtered_sync_room_map)
+
+                    ops: List[SlidingSyncResult.SlidingWindowList.Operation] = []
+
+                    if list_config.ranges:
+                        # Optimization: If we are asking for the full range, we don't
+                        # need to sort the list.
+                        if (
+                            # We're looking for a single range that covers the entire list
+                            len(list_config.ranges) == 1
+                            # Range starts at 0
+                            and list_config.ranges[0][0] == 0
+                            # And the range extends to the end of the list or more. Each
+                            # side is inclusive.
+                            and list_config.ranges[0][1]
+                            >= len(filtered_sync_room_map) - 1
+                        ):
+                            sorted_room_info: List[RoomsForUserType] = list(
+                                filtered_sync_room_map.values()
+                            )
+                        else:
+                            # Sort the list
+                            sorted_room_info = await self.sort_rooms(
+                                # Cast is safe because RoomsForUserSlidingSync is part
+                                # of the `RoomsForUserType` union. Why can't it detect this?
+                                cast(
+                                    Dict[str, RoomsForUserType], filtered_sync_room_map
+                                ),
+                                to_token,
+                                # We only need to sort the rooms up to the end
+                                # of the largest range. Both sides of range are
+                                # inclusive so we `+ 1`.
+                                limit=max(range[1] + 1 for range in list_config.ranges),
+                            )
+
+                        for range in list_config.ranges:
+                            room_ids_in_list: List[str] = []
+
+                            # We're going to loop through the sorted list of rooms starting
+                            # at the range start index and keep adding rooms until we fill
+                            # up the range or run out of rooms.
+                            #
+                            # Both sides of range are inclusive so we `+ 1`
+                            max_num_rooms = range[1] - range[0] + 1
+                            for room_membership in sorted_room_info[range[0] :]:
+                                room_id = room_membership.room_id
+
+                                if len(room_ids_in_list) >= max_num_rooms:
+                                    break
+
+                                # Take the superset of the `RoomSyncConfig` for each room.
+                                #
+                                # Update our `relevant_room_map` with the room we're going
+                                # to display and need to fetch more info about.
+                                existing_room_sync_config = relevant_room_map.get(
+                                    room_id
+                                )
+                                if existing_room_sync_config is not None:
+                                    room_sync_config = existing_room_sync_config.combine_room_sync_config(
+                                        room_sync_config
+                                    )
+
+                                relevant_room_map[room_id] = room_sync_config
+
+                                room_ids_in_list.append(room_id)
+
+                            ops.append(
+                                SlidingSyncResult.SlidingWindowList.Operation(
+                                    op=OperationType.SYNC,
+                                    range=range,
+                                    room_ids=room_ids_in_list,
+                                )
+                            )
+
+                    lists[list_key] = SlidingSyncResult.SlidingWindowList(
+                        count=len(filtered_sync_room_map),
+                        ops=ops,
+                    )
+
+        if sync_config.room_subscriptions:
+            with start_active_span("assemble_room_subscriptions"):
+                # TODO: It would be nice to avoid these copies
+                room_membership_for_user_map = dict(room_membership_for_user_map)
+
+                # Find which rooms are partially stated and may need to be filtered out
+                # depending on the `required_state` requested (see below).
+                partial_state_rooms = await self.store.get_partial_rooms()
+
+                # Fetch any rooms that we have not already fetched from the database.
+                subscription_sliding_sync_rooms = (
+                    await self.store.get_sliding_sync_room_for_user_batch(
+                        user_id,
+                        sync_config.room_subscriptions.keys()
+                        - room_membership_for_user_map.keys(),
+                    )
+                )
+                room_membership_for_user_map.update(subscription_sliding_sync_rooms)
+
+                for (
+                    room_id,
+                    room_subscription,
+                ) in sync_config.room_subscriptions.items():
+                    # Check if we have a membership for the room, but didn't pull it out
+                    # above. This could be e.g. a leave that we don't pull out by
+                    # default.
+                    current_room_entry = room_membership_for_user_map.get(room_id)
+                    if not current_room_entry:
+                        # TODO: Handle rooms the user isn't in.
+                        continue
+
+                    all_rooms.add(room_id)
+
+                    # Take the superset of the `RoomSyncConfig` for each room.
+                    room_sync_config = RoomSyncConfig.from_room_config(
+                        room_subscription
+                    )
+
+                    # Exclude partially-stated rooms if we must wait for the room to be
+                    # fully-stated
+                    if room_sync_config.must_await_full_state(self.is_mine_id):
+                        if room_id in partial_state_rooms:
+                            continue
+
+                    # Update our `relevant_room_map` with the room we're going to display
+                    # and need to fetch more info about.
+                    existing_room_sync_config = relevant_room_map.get(room_id)
+                    if existing_room_sync_config is not None:
+                        room_sync_config = (
+                            existing_room_sync_config.combine_room_sync_config(
+                                room_sync_config
+                            )
+                        )
+
+                    relevant_room_map[room_id] = room_sync_config
+
+        # Filtered subset of `relevant_room_map` for rooms that may have updates
+        # (in the event stream)
+        relevant_rooms_to_send_map = await self._filter_relevant_rooms_to_send(
+            previous_connection_state, from_token, relevant_room_map
+        )
+
+        return SlidingSyncInterestedRooms(
+            lists=lists,
+            relevant_room_map=relevant_room_map,
+            relevant_rooms_to_send_map=relevant_rooms_to_send_map,
+            all_rooms=all_rooms,
+            room_membership_for_user_map=room_membership_for_user_map,
+            newly_joined_rooms=newly_joined_room_ids,
+            newly_left_rooms=set(newly_left_room_map),
+            dm_room_ids=dm_room_ids,
+        )
+
+    async def _compute_interested_rooms_fallback(
+        self,
+        sync_config: SlidingSyncConfig,
+        previous_connection_state: "PerConnectionState",
+        to_token: StreamToken,
+        from_token: Optional[StreamToken],
+    ) -> SlidingSyncInterestedRooms:
+        """Fallback code when the database background updates haven't completed yet."""
+
+        (
+            room_membership_for_user_map,
+            newly_joined_room_ids,
+            newly_left_room_ids,
+        ) = await self.get_room_membership_for_user_at_to_token(
+            sync_config.user, to_token, from_token
+        )
+
+        dm_room_ids = await self._get_dm_rooms_for_user(sync_config.user.to_string())
+
+        # Assemble sliding window lists
+        lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {}
+        # Keep track of the rooms that we can display and need to fetch more info about
+        relevant_room_map: Dict[str, RoomSyncConfig] = {}
+        # The set of room IDs of all rooms that could appear in any list. These
+        # include rooms that are outside the list ranges.
+        all_rooms: Set[str] = set()
+
+        if sync_config.lists:
+            with start_active_span("assemble_sliding_window_lists"):
+                sync_room_map = await self.filter_rooms_relevant_for_sync(
+                    user=sync_config.user,
+                    room_membership_for_user_map=room_membership_for_user_map,
+                    newly_left_room_ids=newly_left_room_ids,
+                )
+
+                for list_key, list_config in sync_config.lists.items():
+                    # Apply filters
+                    filtered_sync_room_map = sync_room_map
+                    if list_config.filters is not None:
+                        filtered_sync_room_map = await self.filter_rooms(
+                            sync_config.user,
+                            sync_room_map,
+                            previous_connection_state,
+                            list_config.filters,
+                            to_token,
+                            dm_room_ids,
+                        )
+
+                    # Find which rooms are partially stated and may need to be filtered out
+                    # depending on the `required_state` requested (see below).
+                    partial_state_rooms = await self.store.get_partial_rooms()
+
+                    # Since creating the `RoomSyncConfig` takes some work, let's just do it
+                    # once.
+                    room_sync_config = RoomSyncConfig.from_room_config(list_config)
+
+                    # Exclude partially-stated rooms if we must wait for the room to be
+                    # fully-stated
+                    if room_sync_config.must_await_full_state(self.is_mine_id):
+                        filtered_sync_room_map = {
+                            room_id: room
+                            for room_id, room in filtered_sync_room_map.items()
+                            if room_id not in partial_state_rooms
+                        }
+
+                    all_rooms.update(filtered_sync_room_map)
+
+                    # Sort the list
+                    sorted_room_info = await self.sort_rooms(
+                        filtered_sync_room_map, to_token
+                    )
+
+                    ops: List[SlidingSyncResult.SlidingWindowList.Operation] = []
+                    if list_config.ranges:
+                        for range in list_config.ranges:
+                            room_ids_in_list: List[str] = []
+
+                            # We're going to loop through the sorted list of rooms starting
+                            # at the range start index and keep adding rooms until we fill
+                            # up the range or run out of rooms.
+                            #
+                            # Both sides of range are inclusive so we `+ 1`
+                            max_num_rooms = range[1] - range[0] + 1
+                            for room_membership in sorted_room_info[range[0] :]:
+                                room_id = room_membership.room_id
+
+                                if len(room_ids_in_list) >= max_num_rooms:
+                                    break
+
+                                # Take the superset of the `RoomSyncConfig` for each room.
+                                #
+                                # Update our `relevant_room_map` with the room we're going
+                                # to display and need to fetch more info about.
+                                existing_room_sync_config = relevant_room_map.get(
+                                    room_id
+                                )
+                                if existing_room_sync_config is not None:
+                                    room_sync_config = existing_room_sync_config.combine_room_sync_config(
+                                        room_sync_config
+                                    )
+
+                                relevant_room_map[room_id] = room_sync_config
+
+                                room_ids_in_list.append(room_id)
+
+                            ops.append(
+                                SlidingSyncResult.SlidingWindowList.Operation(
+                                    op=OperationType.SYNC,
+                                    range=range,
+                                    room_ids=room_ids_in_list,
+                                )
+                            )
+
+                    lists[list_key] = SlidingSyncResult.SlidingWindowList(
+                        count=len(sorted_room_info),
+                        ops=ops,
+                    )
+
+        if sync_config.room_subscriptions:
+            with start_active_span("assemble_room_subscriptions"):
+                # Find which rooms are partially stated and may need to be filtered out
+                # depending on the `required_state` requested (see below).
+                partial_state_rooms = await self.store.get_partial_rooms()
+
+                for (
+                    room_id,
+                    room_subscription,
+                ) in sync_config.room_subscriptions.items():
+                    room_membership_for_user_at_to_token = (
+                        await self.check_room_subscription_allowed_for_user(
+                            room_id=room_id,
+                            room_membership_for_user_map=room_membership_for_user_map,
+                            to_token=to_token,
+                        )
+                    )
+
+                    # Skip this room if the user isn't allowed to see it
+                    if not room_membership_for_user_at_to_token:
+                        continue
+
+                    all_rooms.add(room_id)
+
+                    room_membership_for_user_map[room_id] = (
+                        room_membership_for_user_at_to_token
+                    )
+
+                    # Take the superset of the `RoomSyncConfig` for each room.
+                    room_sync_config = RoomSyncConfig.from_room_config(
+                        room_subscription
+                    )
+
+                    # Exclude partially-stated rooms if we must wait for the room to be
+                    # fully-stated
+                    if room_sync_config.must_await_full_state(self.is_mine_id):
+                        if room_id in partial_state_rooms:
+                            continue
+
+                    all_rooms.add(room_id)
+
+                    # Update our `relevant_room_map` with the room we're going to display
+                    # and need to fetch more info about.
+                    existing_room_sync_config = relevant_room_map.get(room_id)
+                    if existing_room_sync_config is not None:
+                        room_sync_config = (
+                            existing_room_sync_config.combine_room_sync_config(
+                                room_sync_config
+                            )
+                        )
+
+                    relevant_room_map[room_id] = room_sync_config
+
+        # Filtered subset of `relevant_room_map` for rooms that may have updates
+        # (in the event stream)
+        relevant_rooms_to_send_map = await self._filter_relevant_rooms_to_send(
+            previous_connection_state, from_token, relevant_room_map
+        )
+
+        return SlidingSyncInterestedRooms(
+            lists=lists,
+            relevant_room_map=relevant_room_map,
+            relevant_rooms_to_send_map=relevant_rooms_to_send_map,
+            all_rooms=all_rooms,
+            room_membership_for_user_map=room_membership_for_user_map,
+            newly_joined_rooms=newly_joined_room_ids,
+            newly_left_rooms=newly_left_room_ids,
+            dm_room_ids=dm_room_ids,
+        )
+
+    async def _filter_relevant_rooms_to_send(
+        self,
+        previous_connection_state: PerConnectionState,
+        from_token: Optional[StreamToken],
+        relevant_room_map: Dict[str, RoomSyncConfig],
+    ) -> Dict[str, RoomSyncConfig]:
+        """Filters the `relevant_room_map` down to those rooms that may have
+        updates we need to fetch and return."""
+
+        # Filtered subset of `relevant_room_map` for rooms that may have updates
+        # (in the event stream)
+        relevant_rooms_to_send_map: Dict[str, RoomSyncConfig] = relevant_room_map
+        if relevant_room_map:
+            with start_active_span("filter_relevant_rooms_to_send"):
+                if from_token:
+                    rooms_should_send = set()
+
+                    # First we check if there are rooms that match a list/room
+                    # subscription and have updates we need to send (i.e. either because
+                    # we haven't sent the room down, or we have but there are missing
+                    # updates).
+                    for room_id, room_config in relevant_room_map.items():
+                        prev_room_sync_config = (
+                            previous_connection_state.room_configs.get(room_id)
+                        )
+                        if prev_room_sync_config is not None:
+                            # Always include rooms whose timeline limit has increased.
+                            # (see the "XXX: Odd behavior" described below)
+                            if (
+                                prev_room_sync_config.timeline_limit
+                                < room_config.timeline_limit
+                            ):
+                                rooms_should_send.add(room_id)
+                                continue
+
+                        status = previous_connection_state.rooms.have_sent_room(room_id)
+                        if (
+                            # The room was never sent down before so the client needs to know
+                            # about it regardless of any updates.
+                            status.status == HaveSentRoomFlag.NEVER
+                            # `PREVIOUSLY` literally means the "room was sent down before *AND*
+                            # there are updates we haven't sent down" so we already know this
+                            # room has updates.
+                            or status.status == HaveSentRoomFlag.PREVIOUSLY
+                        ):
+                            rooms_should_send.add(room_id)
+                        elif status.status == HaveSentRoomFlag.LIVE:
+                            # We know that we've sent all updates up until `from_token`,
+                            # so we just need to check if there have been updates since
+                            # then.
+                            pass
+                        else:
+                            assert_never(status.status)
+
+                    # We only need to check for new events since any state changes
+                    # will also come down as new events.
+                    rooms_that_have_updates = (
+                        self.store.get_rooms_that_might_have_updates(
+                            relevant_room_map.keys(), from_token.room_key
+                        )
+                    )
+                    rooms_should_send.update(rooms_that_have_updates)
+                    relevant_rooms_to_send_map = {
+                        room_id: room_sync_config
+                        for room_id, room_sync_config in relevant_room_map.items()
+                        if room_id in rooms_should_send
+                    }
+
+        return relevant_rooms_to_send_map
+
+    @trace
+    async def _get_rewind_changes_to_current_membership_to_token(
+        self,
+        user: UserID,
+        rooms_for_user: Mapping[str, RoomsForUserType],
+        to_token: StreamToken,
+    ) -> Mapping[str, Optional[RoomsForUser]]:
+        """
+        Takes the current set of rooms for a user (retrieved after the given
+        token), and returns the changes needed to "rewind" it to match the set of
+        memberships *at that token* (<= `to_token`).
+
+        Args:
+            user: User to fetch rooms for
+            rooms_for_user: The set of rooms for the user after the `to_token`.
+            to_token: The token to rewind to
+
+        Returns:
+            The changes to apply to rewind the the current memberships.
+        """
+        # If the user has never joined any rooms before, we can just return an empty list
+        if not rooms_for_user:
+            return {}
+
+        user_id = user.to_string()
+
+        # Get the `RoomStreamToken` that represents the spot we queried up to when we got
+        # our membership snapshot from `get_rooms_for_local_user_where_membership_is()`.
+        #
+        # First, we need to get the max stream_ordering of each event persister instance
+        # that we queried events from.
+        instance_to_max_stream_ordering_map: Dict[str, int] = {}
+        for room_for_user in rooms_for_user.values():
+            instance_name = room_for_user.event_pos.instance_name
+            stream_ordering = room_for_user.event_pos.stream
+
+            current_instance_max_stream_ordering = (
+                instance_to_max_stream_ordering_map.get(instance_name)
+            )
+            if (
+                current_instance_max_stream_ordering is None
+                or stream_ordering > current_instance_max_stream_ordering
+            ):
+                instance_to_max_stream_ordering_map[instance_name] = stream_ordering
+
+        # Then assemble the `RoomStreamToken`
+        min_stream_pos = min(instance_to_max_stream_ordering_map.values())
+        membership_snapshot_token = RoomStreamToken(
+            # Minimum position in the `instance_map`
+            stream=min_stream_pos,
+            instance_map=immutabledict(
+                {
+                    instance_name: stream_pos
+                    for instance_name, stream_pos in instance_to_max_stream_ordering_map.items()
+                    if stream_pos > min_stream_pos
+                }
+            ),
+        )
+
+        # Since we fetched the users room list at some point in time after the
+        # tokens, we need to revert/rewind some membership changes to match the point in
+        # time of the `to_token`. In particular, we need to make these fixups:
+        #
+        # - a) Remove rooms that the user joined after the `to_token`
+        # - b) Update room membership events to the point in time of the `to_token`
+
+        # Fetch membership changes that fall in the range from `to_token` up to
+        # `membership_snapshot_token`
+        #
+        # If our `to_token` is already the same or ahead of the latest room membership
+        # for the user, we don't need to do any "2)" fix-ups and can just straight-up
+        # use the room list from the snapshot as a base (nothing has changed)
+        current_state_delta_membership_changes_after_to_token = []
+        if not membership_snapshot_token.is_before_or_eq(to_token.room_key):
+            current_state_delta_membership_changes_after_to_token = (
+                await self.store.get_current_state_delta_membership_changes_for_user(
+                    user_id,
+                    from_key=to_token.room_key,
+                    to_key=membership_snapshot_token,
+                    excluded_room_ids=self.rooms_to_exclude_globally,
+                )
+            )
+
+        if not current_state_delta_membership_changes_after_to_token:
+            # There have been no membership changes, so we can early return.
+            return {}
+
+        # Otherwise we're about to make changes to `rooms_for_user`, so we turn
+        # it into a mutable dict.
+        changes: Dict[str, Optional[RoomsForUser]] = {}
+
+        # Assemble a list of the first membership event after the `to_token` so we can
+        # step backward to the previous membership that would apply to the from/to
+        # range.
+        first_membership_change_by_room_id_after_to_token: Dict[
+            str, CurrentStateDeltaMembership
+        ] = {}
+        for membership_change in current_state_delta_membership_changes_after_to_token:
+            # Only set if we haven't already set it
+            first_membership_change_by_room_id_after_to_token.setdefault(
+                membership_change.room_id, membership_change
+            )
+
+        # Since we fetched a snapshot of the users room list at some point in time after
+        # the from/to tokens, we need to revert/rewind some membership changes to match
+        # the point in time of the `to_token`.
+        for (
+            room_id,
+            first_membership_change_after_to_token,
+        ) in first_membership_change_by_room_id_after_to_token.items():
+            # 1a) Remove rooms that the user joined after the `to_token`
+            if first_membership_change_after_to_token.prev_event_id is None:
+                changes[room_id] = None
+            # 1b) 1c) From the first membership event after the `to_token`, step backward to the
+            # previous membership that would apply to the from/to range.
+            else:
+                # We don't expect these fields to be `None` if we have a `prev_event_id`
+                # but we're being defensive since it's possible that the prev event was
+                # culled from the database.
+                if (
+                    first_membership_change_after_to_token.prev_event_pos is not None
+                    and first_membership_change_after_to_token.prev_membership
+                    is not None
+                    and first_membership_change_after_to_token.prev_sender is not None
+                ):
+                    # We need to know the room version ID, which we normally we
+                    # can get from the current membership, but if we don't have
+                    # that then we need to query the DB.
+                    current_membership = rooms_for_user.get(room_id)
+                    if current_membership is not None:
+                        room_version_id = current_membership.room_version_id
+                    else:
+                        room_version_id = await self.store.get_room_version_id(room_id)
+
+                    changes[room_id] = RoomsForUser(
+                        room_id=room_id,
+                        event_id=first_membership_change_after_to_token.prev_event_id,
+                        event_pos=first_membership_change_after_to_token.prev_event_pos,
+                        membership=first_membership_change_after_to_token.prev_membership,
+                        sender=first_membership_change_after_to_token.prev_sender,
+                        room_version_id=room_version_id,
+                    )
+                else:
+                    # If we can't find the previous membership event, we shouldn't
+                    # include the room in the sync response since we can't determine the
+                    # exact membership state and shouldn't rely on the current snapshot.
+                    changes[room_id] = None
+
+        return changes
+
+    @trace
+    async def get_room_membership_for_user_at_to_token(
+        self,
+        user: UserID,
+        to_token: StreamToken,
+        from_token: Optional[StreamToken],
+    ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]:
+        """
+        Fetch room IDs that the user has had membership in (the full room list including
+        long-lost left rooms that will be filtered, sorted, and sliced).
+
+        We're looking for rooms where the user has had any sort of membership in the
+        token range (> `from_token` and <= `to_token`)
+
+        In order for bans/kicks to not show up, you need to `/forget` those rooms. This
+        doesn't modify the event itself though and only adds the `forgotten` flag to the
+        `room_memberships` table in Synapse. There isn't a way to tell when a room was
+        forgotten at the moment so we can't factor it into the token range.
+
+        Args:
+            user: User to fetch rooms for
+            to_token: The token to fetch rooms up to.
+            from_token: The point in the stream to sync from.
+
+        Returns:
+            A 3-tuple of:
+              - A dictionary of room IDs that the user has had membership in along with
+                membership information in that room at the time of `to_token`.
+              - Set of newly joined rooms
+              - Set of newly left rooms
+        """
+        user_id = user.to_string()
+
+        # First grab a current snapshot rooms for the user
+        # (also handles forgotten rooms)
+        room_for_user_list = await self.store.get_rooms_for_local_user_where_membership_is(
+            user_id=user_id,
+            # We want to fetch any kind of membership (joined and left rooms) in order
+            # to get the `event_pos` of the latest room membership event for the
+            # user.
+            membership_list=Membership.LIST,
+            excluded_rooms=self.rooms_to_exclude_globally,
+        )
+
+        # We filter out unknown room versions before we try and load any
+        # metadata about the room. They shouldn't go down sync anyway, and their
+        # metadata may be in a broken state.
+        room_for_user_list = [
+            room_for_user
+            for room_for_user in room_for_user_list
+            if room_for_user.room_version_id in KNOWN_ROOM_VERSIONS
+        ]
+
+        # Remove invites from ignored users
+        ignored_users = await self.store.ignored_users(user_id)
+        if ignored_users:
+            room_for_user_list = [
+                room_for_user
+                for room_for_user in room_for_user_list
+                if not (
+                    room_for_user.membership == Membership.INVITE
+                    and room_for_user.sender in ignored_users
+                )
+            ]
+
+        (
+            newly_joined_room_ids,
+            newly_left_room_map,
+        ) = await self._get_newly_joined_and_left_rooms(
+            user_id, to_token=to_token, from_token=from_token
+        )
+
+        # If the user has never joined any rooms before, we can just return an empty
+        # list. We also have to check the `newly_left_room_map` in case someone was
+        # state reset out of all of the rooms they were in.
+        if not room_for_user_list and not newly_left_room_map:
+            return {}, set(), set()
+
+        # Since we fetched the users room list at some point in time after the
+        # tokens, we need to revert/rewind some membership changes to match the point in
+        # time of the `to_token`.
+        rooms_for_user: Dict[str, RoomsForUserType] = {
+            room.room_id: room for room in room_for_user_list
+        }
+        changes = await self._get_rewind_changes_to_current_membership_to_token(
+            user, rooms_for_user, to_token
+        )
+        for room_id, change_room_for_user in changes.items():
+            if change_room_for_user is None:
+                rooms_for_user.pop(room_id, None)
+            else:
+                rooms_for_user[room_id] = change_room_for_user
+
+        # Ensure we have entries for rooms that the user has been "state reset"
+        # out of. These are rooms appear in the `newly_left_rooms` map but
+        # aren't in the `rooms_for_user` map.
+        for room_id, newly_left_room_for_user in newly_left_room_map.items():
+            # If we already know about the room, it's not a state reset
+            if room_id in rooms_for_user:
+                continue
+
+            # This should be true if it's a state reset
+            assert newly_left_room_for_user.membership is Membership.LEAVE
+            assert newly_left_room_for_user.event_id is None
+            assert newly_left_room_for_user.sender is None
+
+            rooms_for_user[room_id] = newly_left_room_for_user
+
+        return rooms_for_user, newly_joined_room_ids, set(newly_left_room_map)
+
+    @trace
+    async def _get_newly_joined_and_left_rooms(
+        self,
+        user_id: str,
+        to_token: StreamToken,
+        from_token: Optional[StreamToken],
+    ) -> Tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]:
+        """Fetch the sets of rooms that the user newly joined or left in the
+        given token range.
+
+        Note: there may be rooms in the newly left rooms where the user was
+        "state reset" out of the room, and so that room would not be part of the
+        "current memberships" of the user.
+
+        Returns:
+            A 2-tuple of newly joined room IDs and a map of newly_left room
+            IDs to the `RoomsForUserStateReset` entry.
+
+            We're using `RoomsForUserStateReset` but that doesn't necessarily mean the
+            user was state reset of the rooms. It's just that the `event_id`/`sender`
+            are optional and we can't tell the difference between the server leaving the
+            room when the user was the last person participating in the room and left or
+            was state reset out of the room. To actually check for a state reset, you
+            need to check if a membership still exists in the room.
+        """
+        newly_joined_room_ids: Set[str] = set()
+        newly_left_room_map: Dict[str, RoomsForUserStateReset] = {}
+
+        # We need to figure out the
+        #
+        # - 1) Figure out which rooms are `newly_left` rooms (> `from_token` and <= `to_token`)
+        # - 2) Figure out which rooms are `newly_joined` (> `from_token` and <= `to_token`)
+
+        # 1) Fetch membership changes that fall in the range from `from_token` up to `to_token`
+        current_state_delta_membership_changes_in_from_to_range = []
+        if from_token:
+            current_state_delta_membership_changes_in_from_to_range = (
+                await self.store.get_current_state_delta_membership_changes_for_user(
+                    user_id,
+                    from_key=from_token.room_key,
+                    to_key=to_token.room_key,
+                    excluded_room_ids=self.rooms_to_exclude_globally,
+                )
+            )
+
+        # 1) Assemble a list of the last membership events in some given ranges. Someone
+        # could have left and joined multiple times during the given range but we only
+        # care about end-result so we grab the last one.
+        last_membership_change_by_room_id_in_from_to_range: Dict[
+            str, CurrentStateDeltaMembership
+        ] = {}
+        # We also want to assemble a list of the first membership events during the token
+        # range so we can step backward to the previous membership that would apply to
+        # before the token range to see if we have `newly_joined` the room.
+        first_membership_change_by_room_id_in_from_to_range: Dict[
+            str, CurrentStateDeltaMembership
+        ] = {}
+        # Keep track if the room has a non-join event in the token range so we can later
+        # tell if it was a `newly_joined` room. If the last membership event in the
+        # token range is a join and there is also some non-join in the range, we know
+        # they `newly_joined`.
+        has_non_join_event_by_room_id_in_from_to_range: Dict[str, bool] = {}
+        for (
+            membership_change
+        ) in current_state_delta_membership_changes_in_from_to_range:
+            room_id = membership_change.room_id
+
+            last_membership_change_by_room_id_in_from_to_range[room_id] = (
+                membership_change
+            )
+            # Only set if we haven't already set it
+            first_membership_change_by_room_id_in_from_to_range.setdefault(
+                room_id, membership_change
+            )
+
+            if membership_change.membership != Membership.JOIN:
+                has_non_join_event_by_room_id_in_from_to_range[room_id] = True
+
+        # 1) Fixup
+        #
+        # 2) We also want to assemble a list of possibly newly joined rooms. Someone
+        # could have left and joined multiple times during the given range but we only
+        # care about whether they are joined at the end of the token range so we are
+        # working with the last membership even in the token range.
+        possibly_newly_joined_room_ids = set()
+        for (
+            last_membership_change_in_from_to_range
+        ) in last_membership_change_by_room_id_in_from_to_range.values():
+            room_id = last_membership_change_in_from_to_range.room_id
+
+            # 2)
+            if last_membership_change_in_from_to_range.membership == Membership.JOIN:
+                possibly_newly_joined_room_ids.add(room_id)
+
+            # 1) Figure out newly_left rooms (> `from_token` and <= `to_token`).
+            if last_membership_change_in_from_to_range.membership == Membership.LEAVE:
+                # 1) Mark this room as `newly_left`
+                newly_left_room_map[room_id] = RoomsForUserStateReset(
+                    room_id=room_id,
+                    sender=last_membership_change_in_from_to_range.sender,
+                    membership=Membership.LEAVE,
+                    event_id=last_membership_change_in_from_to_range.event_id,
+                    event_pos=last_membership_change_in_from_to_range.event_pos,
+                    room_version_id=await self.store.get_room_version_id(room_id),
+                )
+
+        # 2) Figure out `newly_joined`
+        for room_id in possibly_newly_joined_room_ids:
+            has_non_join_in_from_to_range = (
+                has_non_join_event_by_room_id_in_from_to_range.get(room_id, False)
+            )
+            # If the last membership event in the token range is a join and there is
+            # also some non-join in the range, we know they `newly_joined`.
+            if has_non_join_in_from_to_range:
+                # We found a `newly_joined` room (we left and joined within the token range)
+                newly_joined_room_ids.add(room_id)
+            else:
+                prev_event_id = first_membership_change_by_room_id_in_from_to_range[
+                    room_id
+                ].prev_event_id
+                prev_membership = first_membership_change_by_room_id_in_from_to_range[
+                    room_id
+                ].prev_membership
+
+                if prev_event_id is None:
+                    # We found a `newly_joined` room (we are joining the room for the
+                    # first time within the token range)
+                    newly_joined_room_ids.add(room_id)
+                # Last resort, we need to step back to the previous membership event
+                # just before the token range to see if we're joined then or not.
+                elif prev_membership != Membership.JOIN:
+                    # We found a `newly_joined` room (we left before the token range
+                    # and joined within the token range)
+                    newly_joined_room_ids.add(room_id)
+
+        return newly_joined_room_ids, newly_left_room_map
+
+    @trace
+    async def _get_dm_rooms_for_user(
+        self,
+        user_id: str,
+    ) -> AbstractSet[str]:
+        """Get the set of DM rooms for the user."""
+
+        # We're using global account data (`m.direct`) instead of checking for
+        # `is_direct` on membership events because that property only appears for
+        # the invitee membership event (doesn't show up for the inviter).
+        #
+        # We're unable to take `to_token` into account for global account data since
+        # we only keep track of the latest account data for the user.
+        dm_map = await self.store.get_global_account_data_by_type_for_user(
+            user_id, AccountDataTypes.DIRECT
+        )
+
+        # Flatten out the map. Account data is set by the client so it needs to be
+        # scrutinized.
+        dm_room_id_set = set()
+        if isinstance(dm_map, dict):
+            for room_ids in dm_map.values():
+                # Account data should be a list of room IDs. Ignore anything else
+                if isinstance(room_ids, list):
+                    for room_id in room_ids:
+                        if isinstance(room_id, str):
+                            dm_room_id_set.add(room_id)
+
+        return dm_room_id_set
+
+    @trace
+    async def filter_rooms_relevant_for_sync(
+        self,
+        user: UserID,
+        room_membership_for_user_map: Dict[str, RoomsForUserType],
+        newly_left_room_ids: AbstractSet[str],
+    ) -> Dict[str, RoomsForUserType]:
+        """
+        Filter room IDs that should/can be listed for this user in the sync response (the
+        full room list that will be further filtered, sorted, and sliced).
+
+        We're looking for rooms where the user has the following state in the token
+        range (> `from_token` and <= `to_token`):
+
+        - `invite`, `join`, `knock`, `ban` membership events
+        - Kicks (`leave` membership events where `sender` is different from the
+          `user_id`/`state_key`)
+        - `newly_left` (rooms that were left during the given token range)
+        - In order for bans/kicks to not show up in sync, you need to `/forget` those
+          rooms. This doesn't modify the event itself though and only adds the
+          `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way
+          to tell when a room was forgotten at the moment so we can't factor it into the
+          from/to range.
+
+        Args:
+            user: User that is syncing
+            room_membership_for_user_map: Room membership for the user
+            newly_left_room_ids: The set of room IDs we have newly left
+
+        Returns:
+            A dictionary of room IDs that should be listed in the sync response along
+            with membership information in that room at the time of `to_token`.
+        """
+        user_id = user.to_string()
+
+        # Filter rooms to only what we're interested to sync with
+        filtered_sync_room_map = {
+            room_id: room_membership_for_user
+            for room_id, room_membership_for_user in room_membership_for_user_map.items()
+            if filter_membership_for_sync(
+                user_id=user_id,
+                room_membership_for_user=room_membership_for_user,
+                newly_left=room_id in newly_left_room_ids,
+            )
+        }
+
+        return filtered_sync_room_map
+
+    async def check_room_subscription_allowed_for_user(
+        self,
+        room_id: str,
+        room_membership_for_user_map: Dict[str, RoomsForUserType],
+        to_token: StreamToken,
+    ) -> Optional[RoomsForUserType]:
+        """
+        Check whether the user is allowed to see the room based on whether they have
+        ever had membership in the room or if the room is `world_readable`.
+
+        Similar to `check_user_in_room_or_world_readable(...)`
+
+        Args:
+            room_id: Room to check
+            room_membership_for_user_map: Room membership for the user at the time of
+                the `to_token` (<= `to_token`).
+            to_token: The token to fetch rooms up to.
+
+        Returns:
+            The room membership for the user if they are allowed to subscribe to the
+            room else `None`.
+        """
+
+        # We can first check if they are already allowed to see the room based
+        # on our previous work to assemble the `room_membership_for_user_map`.
+        #
+        # If they have had any membership in the room over time (up to the `to_token`),
+        # let them subscribe and see what they can.
+        existing_membership_for_user = room_membership_for_user_map.get(room_id)
+        if existing_membership_for_user is not None:
+            return existing_membership_for_user
+
+        # TODO: Handle `world_readable` rooms
+        return None
+
+        # If the room is `world_readable`, it doesn't matter whether they can join,
+        # everyone can see the room.
+        # not_in_room_membership_for_user = _RoomMembershipForUser(
+        #     room_id=room_id,
+        #     event_id=None,
+        #     event_pos=None,
+        #     membership=None,
+        #     sender=None,
+        #     newly_joined=False,
+        #     newly_left=False,
+        #     is_dm=False,
+        # )
+        # room_state = await self.get_current_state_at(
+        #     room_id=room_id,
+        #     room_membership_for_user_at_to_token=not_in_room_membership_for_user,
+        #     state_filter=StateFilter.from_types(
+        #         [(EventTypes.RoomHistoryVisibility, "")]
+        #     ),
+        #     to_token=to_token,
+        # )
+
+        # visibility_event = room_state.get((EventTypes.RoomHistoryVisibility, ""))
+        # if (
+        #     visibility_event is not None
+        #     and visibility_event.content.get("history_visibility")
+        #     == HistoryVisibility.WORLD_READABLE
+        # ):
+        #     return not_in_room_membership_for_user
+
+        # return None
+
+    @trace
+    async def _bulk_get_stripped_state_for_rooms_from_sync_room_map(
+        self,
+        room_ids: StrCollection,
+        sync_room_map: Dict[str, RoomsForUserType],
+    ) -> Dict[str, Optional[StateMap[StrippedStateEvent]]]:
+        """
+        Fetch stripped state for a list of room IDs. Stripped state is only
+        applicable to invite/knock rooms. Other rooms will have `None` as their
+        stripped state.
+
+        For invite rooms, we pull from `unsigned.invite_room_state`.
+        For knock rooms, we pull from `unsigned.knock_room_state`.
+
+        Args:
+            room_ids: Room IDs to fetch stripped state for
+            sync_room_map: Dictionary of room IDs to sort along with membership
+                information in the room at the time of `to_token`.
+
+        Returns:
+            Mapping from room_id to mapping of (type, state_key) to stripped state
+            event.
+        """
+        room_id_to_stripped_state_map: Dict[
+            str, Optional[StateMap[StrippedStateEvent]]
+        ] = {}
+
+        # Fetch what we haven't before
+        room_ids_to_fetch = [
+            room_id
+            for room_id in room_ids
+            if room_id not in room_id_to_stripped_state_map
+        ]
+
+        # Gather a list of event IDs we can grab stripped state from
+        invite_or_knock_event_ids: List[str] = []
+        for room_id in room_ids_to_fetch:
+            if sync_room_map[room_id].membership in (
+                Membership.INVITE,
+                Membership.KNOCK,
+            ):
+                event_id = sync_room_map[room_id].event_id
+                # If this is an invite/knock then there should be an event_id
+                assert event_id is not None
+                invite_or_knock_event_ids.append(event_id)
+            else:
+                room_id_to_stripped_state_map[room_id] = None
+
+        invite_or_knock_events = await self.store.get_events(invite_or_knock_event_ids)
+        for invite_or_knock_event in invite_or_knock_events.values():
+            room_id = invite_or_knock_event.room_id
+            membership = invite_or_knock_event.membership
+
+            raw_stripped_state_events = None
+            if membership == Membership.INVITE:
+                invite_room_state = invite_or_knock_event.unsigned.get(
+                    "invite_room_state"
+                )
+                raw_stripped_state_events = invite_room_state
+            elif membership == Membership.KNOCK:
+                knock_room_state = invite_or_knock_event.unsigned.get(
+                    "knock_room_state"
+                )
+                raw_stripped_state_events = knock_room_state
+            else:
+                raise AssertionError(
+                    f"Unexpected membership {membership} (this is a problem with Synapse itself)"
+                )
+
+            stripped_state_map: Optional[MutableStateMap[StrippedStateEvent]] = None
+            # Scrutinize unsigned things. `raw_stripped_state_events` should be a list
+            # of stripped events
+            if raw_stripped_state_events is not None:
+                stripped_state_map = {}
+                if isinstance(raw_stripped_state_events, list):
+                    for raw_stripped_event in raw_stripped_state_events:
+                        stripped_state_event = parse_stripped_state_event(
+                            raw_stripped_event
+                        )
+                        if stripped_state_event is not None:
+                            stripped_state_map[
+                                (
+                                    stripped_state_event.type,
+                                    stripped_state_event.state_key,
+                                )
+                            ] = stripped_state_event
+
+            room_id_to_stripped_state_map[room_id] = stripped_state_map
+
+        return room_id_to_stripped_state_map
+
+    @trace
+    async def _bulk_get_partial_current_state_content_for_rooms(
+        self,
+        content_type: Literal[
+            # `content.type` from `EventTypes.Create``
+            "room_type",
+            # `content.algorithm` from `EventTypes.RoomEncryption`
+            "room_encryption",
+        ],
+        room_ids: Set[str],
+        sync_room_map: Dict[str, RoomsForUserType],
+        to_token: StreamToken,
+        room_id_to_stripped_state_map: Dict[
+            str, Optional[StateMap[StrippedStateEvent]]
+        ],
+    ) -> Mapping[str, Union[Optional[str], StateSentinel]]:
+        """
+        Get the given state event content for a list of rooms. First we check the
+        current state of the room, then fallback to stripped state if available, then
+        historical state.
+
+        Args:
+            content_type: Which content to grab
+            room_ids: Room IDs to fetch the given content field for.
+            sync_room_map: Dictionary of room IDs to sort along with membership
+                information in the room at the time of `to_token`.
+            to_token: We filter based on the state of the room at this token
+            room_id_to_stripped_state_map: This does not need to be filled in before
+                calling this function. Mapping from room_id to mapping of (type, state_key)
+                to stripped state event. Modified in place when we fetch new rooms so we can
+                save work next time this function is called.
+
+        Returns:
+            A mapping from room ID to the state event content if the room has
+            the given state event (event_type, ""), otherwise `None`. Rooms unknown to
+            this server will return `ROOM_UNKNOWN_SENTINEL`.
+        """
+        room_id_to_content: Dict[str, Union[Optional[str], StateSentinel]] = {}
+
+        # As a bulk shortcut, use the current state if the server is particpating in the
+        # room (meaning we have current state). Ideally, for leave/ban rooms, we would
+        # want the state at the time of the membership instead of current state to not
+        # leak anything but we consider the create/encryption stripped state events to
+        # not be a secret given they are often set at the start of the room and they are
+        # normally handed out on invite/knock.
+        #
+        # Be mindful to only use this for non-sensitive details. For example, even
+        # though the room name/avatar/topic are also stripped state, they seem a lot
+        # more senstive to leak the current state value of.
+        #
+        # Since this function is cached, we need to make a mutable copy via
+        # `dict(...)`.
+        event_type = ""
+        event_content_field = ""
+        if content_type == "room_type":
+            event_type = EventTypes.Create
+            event_content_field = EventContentFields.ROOM_TYPE
+            room_id_to_content = dict(await self.store.bulk_get_room_type(room_ids))
+        elif content_type == "room_encryption":
+            event_type = EventTypes.RoomEncryption
+            event_content_field = EventContentFields.ENCRYPTION_ALGORITHM
+            room_id_to_content = dict(
+                await self.store.bulk_get_room_encryption(room_ids)
+            )
+        else:
+            assert_never(content_type)
+
+        room_ids_with_results = [
+            room_id
+            for room_id, content_field in room_id_to_content.items()
+            if content_field is not ROOM_UNKNOWN_SENTINEL
+        ]
+
+        # We might not have current room state for remote invite/knocks if we are
+        # the first person on our server to see the room. The best we can do is look
+        # in the optional stripped state from the invite/knock event.
+        room_ids_without_results = room_ids.difference(
+            chain(
+                room_ids_with_results,
+                [
+                    room_id
+                    for room_id, stripped_state_map in room_id_to_stripped_state_map.items()
+                    if stripped_state_map is not None
+                ],
+            )
+        )
+        room_id_to_stripped_state_map.update(
+            await self._bulk_get_stripped_state_for_rooms_from_sync_room_map(
+                room_ids_without_results, sync_room_map
+            )
+        )
+
+        # Update our `room_id_to_content` map based on the stripped state
+        # (applies to invite/knock rooms)
+        rooms_ids_without_stripped_state: Set[str] = set()
+        for room_id in room_ids_without_results:
+            stripped_state_map = room_id_to_stripped_state_map.get(
+                room_id, Sentinel.UNSET_SENTINEL
+            )
+            assert stripped_state_map is not Sentinel.UNSET_SENTINEL, (
+                f"Stripped state left unset for room {room_id}. "
+                + "Make sure you're calling `_bulk_get_stripped_state_for_rooms_from_sync_room_map(...)` "
+                + "with that room_id. (this is a problem with Synapse itself)"
+            )
+
+            # If there is some stripped state, we assume the remote server passed *all*
+            # of the potential stripped state events for the room.
+            if stripped_state_map is not None:
+                create_stripped_event = stripped_state_map.get((EventTypes.Create, ""))
+                stripped_event = stripped_state_map.get((event_type, ""))
+                # Sanity check that we at-least have the create event
+                if create_stripped_event is not None:
+                    if stripped_event is not None:
+                        room_id_to_content[room_id] = stripped_event.content.get(
+                            event_content_field
+                        )
+                    else:
+                        # Didn't see the state event we're looking for in the stripped
+                        # state so we can assume relevant content field is `None`.
+                        room_id_to_content[room_id] = None
+            else:
+                rooms_ids_without_stripped_state.add(room_id)
+
+        # Last resort, we might not have current room state for rooms that the
+        # server has left (no one local is in the room) but we can look at the
+        # historical state.
+        #
+        # Update our `room_id_to_content` map based on the state at the time of
+        # the membership event.
+        for room_id in rooms_ids_without_stripped_state:
+            # TODO: It would be nice to look this up in a bulk way (N+1 queries)
+            #
+            # TODO: `get_state_at(...)` doesn't take into account the "current state".
+            room_state = await self.storage_controllers.state.get_state_at(
+                room_id=room_id,
+                stream_position=to_token.copy_and_replace(
+                    StreamKeyType.ROOM,
+                    sync_room_map[room_id].event_pos.to_room_stream_token(),
+                ),
+                state_filter=StateFilter.from_types(
+                    [
+                        (EventTypes.Create, ""),
+                        (event_type, ""),
+                    ]
+                ),
+                # Partially-stated rooms should have all state events except for
+                # remote membership events so we don't need to wait at all because
+                # we only want the create event and some non-member event.
+                await_full_state=False,
+            )
+            # We can use the create event as a canary to tell whether the server has
+            # seen the room before
+            create_event = room_state.get((EventTypes.Create, ""))
+            state_event = room_state.get((event_type, ""))
+
+            if create_event is None:
+                # Skip for unknown rooms
+                continue
+
+            if state_event is not None:
+                room_id_to_content[room_id] = state_event.content.get(
+                    event_content_field
+                )
+            else:
+                # Didn't see the state event we're looking for in the stripped
+                # state so we can assume relevant content field is `None`.
+                room_id_to_content[room_id] = None
+
+        return room_id_to_content
+
+    @trace
+    async def filter_rooms(
+        self,
+        user: UserID,
+        sync_room_map: Dict[str, RoomsForUserType],
+        previous_connection_state: PerConnectionState,
+        filters: SlidingSyncConfig.SlidingSyncList.Filters,
+        to_token: StreamToken,
+        dm_room_ids: AbstractSet[str],
+    ) -> Dict[str, RoomsForUserType]:
+        """
+        Filter rooms based on the sync request.
+
+        Args:
+            user: User to filter rooms for
+            sync_room_map: Dictionary of room IDs to sort along with membership
+                information in the room at the time of `to_token`.
+            filters: Filters to apply
+            to_token: We filter based on the state of the room at this token
+            dm_room_ids: Set of room IDs that are DMs for the user
+
+        Returns:
+            A filtered dictionary of room IDs along with membership information in the
+            room at the time of `to_token`.
+        """
+        user_id = user.to_string()
+
+        room_id_to_stripped_state_map: Dict[
+            str, Optional[StateMap[StrippedStateEvent]]
+        ] = {}
+
+        filtered_room_id_set = set(sync_room_map.keys())
+
+        # Filter for Direct-Message (DM) rooms
+        if filters.is_dm is not None:
+            with start_active_span("filters.is_dm"):
+                if filters.is_dm:
+                    # Only DM rooms please
+                    filtered_room_id_set = {
+                        room_id
+                        for room_id in filtered_room_id_set
+                        if room_id in dm_room_ids
+                    }
+                else:
+                    # Only non-DM rooms please
+                    filtered_room_id_set = {
+                        room_id
+                        for room_id in filtered_room_id_set
+                        if room_id not in dm_room_ids
+                    }
+
+        if filters.spaces is not None:
+            with start_active_span("filters.spaces"):
+                raise NotImplementedError()
+
+        # Filter for encrypted rooms
+        if filters.is_encrypted is not None:
+            with start_active_span("filters.is_encrypted"):
+                room_id_to_encryption = (
+                    await self._bulk_get_partial_current_state_content_for_rooms(
+                        content_type="room_encryption",
+                        room_ids=filtered_room_id_set,
+                        to_token=to_token,
+                        sync_room_map=sync_room_map,
+                        room_id_to_stripped_state_map=room_id_to_stripped_state_map,
+                    )
+                )
+
+                # Make a copy so we don't run into an error: `Set changed size during
+                # iteration`, when we filter out and remove items
+                for room_id in filtered_room_id_set.copy():
+                    encryption = room_id_to_encryption.get(
+                        room_id, ROOM_UNKNOWN_SENTINEL
+                    )
+
+                    # Just remove rooms if we can't determine their encryption status
+                    if encryption is ROOM_UNKNOWN_SENTINEL:
+                        filtered_room_id_set.remove(room_id)
+                        continue
+
+                    # If we're looking for encrypted rooms, filter out rooms that are not
+                    # encrypted and vice versa
+                    is_encrypted = encryption is not None
+                    if (filters.is_encrypted and not is_encrypted) or (
+                        not filters.is_encrypted and is_encrypted
+                    ):
+                        filtered_room_id_set.remove(room_id)
+
+        # Filter for rooms that the user has been invited to
+        if filters.is_invite is not None:
+            with start_active_span("filters.is_invite"):
+                # Make a copy so we don't run into an error: `Set changed size during
+                # iteration`, when we filter out and remove items
+                for room_id in filtered_room_id_set.copy():
+                    room_for_user = sync_room_map[room_id]
+                    # If we're looking for invite rooms, filter out rooms that the user is
+                    # not invited to and vice versa
+                    if (
+                        filters.is_invite
+                        and room_for_user.membership != Membership.INVITE
+                    ) or (
+                        not filters.is_invite
+                        and room_for_user.membership == Membership.INVITE
+                    ):
+                        filtered_room_id_set.remove(room_id)
+
+        # Filter by room type (space vs room, etc). A room must match one of the types
+        # provided in the list. `None` is a valid type for rooms which do not have a
+        # room type.
+        if filters.room_types is not None or filters.not_room_types is not None:
+            with start_active_span("filters.room_types"):
+                room_id_to_type = (
+                    await self._bulk_get_partial_current_state_content_for_rooms(
+                        content_type="room_type",
+                        room_ids=filtered_room_id_set,
+                        to_token=to_token,
+                        sync_room_map=sync_room_map,
+                        room_id_to_stripped_state_map=room_id_to_stripped_state_map,
+                    )
+                )
+
+                # Make a copy so we don't run into an error: `Set changed size during
+                # iteration`, when we filter out and remove items
+                for room_id in filtered_room_id_set.copy():
+                    room_type = room_id_to_type.get(room_id, ROOM_UNKNOWN_SENTINEL)
+
+                    # Just remove rooms if we can't determine their type
+                    if room_type is ROOM_UNKNOWN_SENTINEL:
+                        filtered_room_id_set.remove(room_id)
+                        continue
+
+                    if (
+                        filters.room_types is not None
+                        and room_type not in filters.room_types
+                    ):
+                        filtered_room_id_set.remove(room_id)
+                        continue
+
+                    if (
+                        filters.not_room_types is not None
+                        and room_type in filters.not_room_types
+                    ):
+                        filtered_room_id_set.remove(room_id)
+                        continue
+
+        if filters.room_name_like is not None:
+            with start_active_span("filters.room_name_like"):
+                # TODO: The room name is a bit more sensitive to leak than the
+                # create/encryption event. Maybe we should consider a better way to fetch
+                # historical state before implementing this.
+                #
+                # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms(
+                #     content_type="room_name",
+                #     room_ids=filtered_room_id_set,
+                #     to_token=to_token,
+                #     sync_room_map=sync_room_map,
+                #     room_id_to_stripped_state_map=room_id_to_stripped_state_map,
+                # )
+                raise NotImplementedError()
+
+        # Filter by room tags according to the users account data
+        if filters.tags is not None or filters.not_tags is not None:
+            with start_active_span("filters.tags"):
+                # Fetch the user tags for their rooms
+                room_tags = await self.store.get_tags_for_user(user_id)
+                room_id_to_tag_name_set: Dict[str, Set[str]] = {
+                    room_id: set(tags.keys()) for room_id, tags in room_tags.items()
+                }
+
+                if filters.tags is not None:
+                    tags_set = set(filters.tags)
+                    filtered_room_id_set = {
+                        room_id
+                        for room_id in filtered_room_id_set
+                        # Remove rooms that don't have one of the tags in the filter
+                        if room_id_to_tag_name_set.get(room_id, set()).intersection(
+                            tags_set
+                        )
+                    }
+
+                if filters.not_tags is not None:
+                    not_tags_set = set(filters.not_tags)
+                    filtered_room_id_set = {
+                        room_id
+                        for room_id in filtered_room_id_set
+                        # Remove rooms if they have any of the tags in the filter
+                        if not room_id_to_tag_name_set.get(room_id, set()).intersection(
+                            not_tags_set
+                        )
+                    }
+
+        # Keep rooms if the user has been state reset out of it but we previously sent
+        # down the connection before. We want to make sure that we send these down to
+        # the client regardless of filters so they find out about the state reset.
+        #
+        # We don't always have access to the state in a room after being state reset if
+        # no one else locally on the server is participating in the room so we patch
+        # these back in manually.
+        state_reset_out_of_room_id_set = {
+            room_id
+            for room_id in sync_room_map.keys()
+            if sync_room_map[room_id].event_id is None
+            and previous_connection_state.rooms.have_sent_room(room_id).status
+            != HaveSentRoomFlag.NEVER
+        }
+
+        # Assemble a new sync room map but only with the `filtered_room_id_set`
+        return {
+            room_id: sync_room_map[room_id]
+            for room_id in filtered_room_id_set | state_reset_out_of_room_id_set
+        }
+
+    @trace
+    async def filter_rooms_using_tables(
+        self,
+        user_id: str,
+        sync_room_map: Mapping[str, RoomsForUserSlidingSync],
+        previous_connection_state: PerConnectionState,
+        filters: SlidingSyncConfig.SlidingSyncList.Filters,
+        to_token: StreamToken,
+        dm_room_ids: AbstractSet[str],
+    ) -> Dict[str, RoomsForUserSlidingSync]:
+        """
+        Filter rooms based on the sync request.
+
+        Args:
+            user: User to filter rooms for
+            sync_room_map: Dictionary of room IDs to sort along with membership
+                information in the room at the time of `to_token`.
+            filters: Filters to apply
+            to_token: We filter based on the state of the room at this token
+            dm_room_ids: Set of room IDs which are DMs
+            room_tags: Mapping of room ID to tags
+
+        Returns:
+            A filtered dictionary of room IDs along with membership information in the
+            room at the time of `to_token`.
+        """
+
+        filtered_room_id_set = set(sync_room_map.keys())
+
+        # Filter for Direct-Message (DM) rooms
+        if filters.is_dm is not None:
+            with start_active_span("filters.is_dm"):
+                if filters.is_dm:
+                    # Intersect with the DM room set
+                    filtered_room_id_set &= dm_room_ids
+                else:
+                    # Remove DMs
+                    filtered_room_id_set -= dm_room_ids
+
+        if filters.spaces is not None:
+            with start_active_span("filters.spaces"):
+                raise NotImplementedError()
+
+        # Filter for encrypted rooms
+        if filters.is_encrypted is not None:
+            filtered_room_id_set = {
+                room_id
+                for room_id in filtered_room_id_set
+                # Remove rooms if we can't figure out what the encryption status is
+                if sync_room_map[room_id].has_known_state
+                # Or remove if it doesn't match the filter
+                and sync_room_map[room_id].is_encrypted == filters.is_encrypted
+            }
+
+        # Filter for rooms that the user has been invited to
+        if filters.is_invite is not None:
+            with start_active_span("filters.is_invite"):
+                # Make a copy so we don't run into an error: `Set changed size during
+                # iteration`, when we filter out and remove items
+                for room_id in filtered_room_id_set.copy():
+                    room_for_user = sync_room_map[room_id]
+                    # If we're looking for invite rooms, filter out rooms that the user is
+                    # not invited to and vice versa
+                    if (
+                        filters.is_invite
+                        and room_for_user.membership != Membership.INVITE
+                    ) or (
+                        not filters.is_invite
+                        and room_for_user.membership == Membership.INVITE
+                    ):
+                        filtered_room_id_set.remove(room_id)
+
+        # Filter by room type (space vs room, etc). A room must match one of the types
+        # provided in the list. `None` is a valid type for rooms which do not have a
+        # room type.
+        if filters.room_types is not None or filters.not_room_types is not None:
+            with start_active_span("filters.room_types"):
+                # Make a copy so we don't run into an error: `Set changed size during
+                # iteration`, when we filter out and remove items
+                for room_id in filtered_room_id_set.copy():
+                    # Remove rooms if we can't figure out what room type it is
+                    if not sync_room_map[room_id].has_known_state:
+                        filtered_room_id_set.remove(room_id)
+                        continue
+
+                    room_type = sync_room_map[room_id].room_type
+
+                    if (
+                        filters.room_types is not None
+                        and room_type not in filters.room_types
+                    ):
+                        filtered_room_id_set.remove(room_id)
+                        continue
+
+                    if (
+                        filters.not_room_types is not None
+                        and room_type in filters.not_room_types
+                    ):
+                        filtered_room_id_set.remove(room_id)
+                        continue
+
+        if filters.room_name_like is not None:
+            with start_active_span("filters.room_name_like"):
+                # TODO: The room name is a bit more sensitive to leak than the
+                # create/encryption event. Maybe we should consider a better way to fetch
+                # historical state before implementing this.
+                #
+                # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms(
+                #     content_type="room_name",
+                #     room_ids=filtered_room_id_set,
+                #     to_token=to_token,
+                #     sync_room_map=sync_room_map,
+                #     room_id_to_stripped_state_map=room_id_to_stripped_state_map,
+                # )
+                raise NotImplementedError()
+
+        # Filter by room tags according to the users account data
+        if filters.tags is not None or filters.not_tags is not None:
+            with start_active_span("filters.tags"):
+                # Fetch the user tags for their rooms
+                room_tags = await self.store.get_tags_for_user(user_id)
+                room_id_to_tag_name_set: Dict[str, Set[str]] = {
+                    room_id: set(tags.keys()) for room_id, tags in room_tags.items()
+                }
+
+                if filters.tags is not None:
+                    tags_set = set(filters.tags)
+                    filtered_room_id_set = {
+                        room_id
+                        for room_id in filtered_room_id_set
+                        # Remove rooms that don't have one of the tags in the filter
+                        if room_id_to_tag_name_set.get(room_id, set()).intersection(
+                            tags_set
+                        )
+                    }
+
+                if filters.not_tags is not None:
+                    not_tags_set = set(filters.not_tags)
+                    filtered_room_id_set = {
+                        room_id
+                        for room_id in filtered_room_id_set
+                        # Remove rooms if they have any of the tags in the filter
+                        if not room_id_to_tag_name_set.get(room_id, set()).intersection(
+                            not_tags_set
+                        )
+                    }
+
+        # Keep rooms if the user has been state reset out of it but we previously sent
+        # down the connection before. We want to make sure that we send these down to
+        # the client regardless of filters so they find out about the state reset.
+        #
+        # We don't always have access to the state in a room after being state reset if
+        # no one else locally on the server is participating in the room so we patch
+        # these back in manually.
+        state_reset_out_of_room_id_set = {
+            room_id
+            for room_id in sync_room_map.keys()
+            if sync_room_map[room_id].event_id is None
+            and previous_connection_state.rooms.have_sent_room(room_id).status
+            != HaveSentRoomFlag.NEVER
+        }
+
+        # Assemble a new sync room map but only with the `filtered_room_id_set`
+        return {
+            room_id: sync_room_map[room_id]
+            for room_id in filtered_room_id_set | state_reset_out_of_room_id_set
+        }
+
+    @trace
+    async def sort_rooms(
+        self,
+        sync_room_map: Dict[str, RoomsForUserType],
+        to_token: StreamToken,
+        limit: Optional[int] = None,
+    ) -> List[RoomsForUserType]:
+        """
+        Sort by `stream_ordering` of the last event that the user should see in the
+        room. `stream_ordering` is unique so we get a stable sort.
+
+        If `limit` is specified then sort may return fewer entries, but will
+        always return at least the top N rooms. This is useful as we don't always
+        need to sort the full list, but are just interested in the top N.
+
+        Args:
+            sync_room_map: Dictionary of room IDs to sort along with membership
+                information in the room at the time of `to_token`.
+            to_token: We sort based on the events in the room at this token (<= `to_token`)
+            limit: The number of rooms that we need to return from the top of the list.
+
+        Returns:
+            A sorted list of room IDs by `stream_ordering` along with membership information.
+        """
+
+        # Assemble a map of room ID to the `stream_ordering` of the last activity that the
+        # user should see in the room (<= `to_token`)
+        last_activity_in_room_map: Dict[str, int] = {}
+
+        # Same as above, except for positions that we know are in the event
+        # stream cache.
+        cached_positions: Dict[str, int] = {}
+
+        earliest_cache_position = (
+            self.store._events_stream_cache.get_earliest_known_position()
+        )
+
+        for room_id, room_for_user in sync_room_map.items():
+            if room_for_user.membership == Membership.JOIN:
+                # For joined rooms check the stream change cache.
+                cached_position = (
+                    self.store._events_stream_cache.get_max_pos_of_last_change(room_id)
+                )
+                if cached_position is not None:
+                    cached_positions[room_id] = cached_position
+            else:
+                # If the user has left/been invited/knocked/been banned from a
+                # room, they shouldn't see anything past that point.
+                #
+                # FIXME: It's possible that people should see beyond this point
+                # in invited/knocked cases if for example the room has
+                # `invite`/`world_readable` history visibility, see
+                # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932
+                last_activity_in_room_map[room_id] = room_for_user.event_pos.stream
+
+                # If the stream position is in range of the stream change cache
+                # we can include it.
+                if room_for_user.event_pos.stream > earliest_cache_position:
+                    cached_positions[room_id] = room_for_user.event_pos.stream
+
+        # If we are only asked for the top N rooms, and we have enough from
+        # looking in the stream change cache, then we can return early. This
+        # is because the cache must include all entries above
+        # `.get_earliest_known_position()`.
+        if limit is not None and len(cached_positions) >= limit:
+            # ... but first we need to handle the case where the cached max
+            # position is greater than the to_token, in which case we do
+            # actually query the DB. This should happen rarely, so can do it in
+            # a loop.
+            for room_id, position in list(cached_positions.items()):
+                if position > to_token.room_key.stream:
+                    result = await self.store.get_last_event_pos_in_room_before_stream_ordering(
+                        room_id, to_token.room_key
+                    )
+                    if (
+                        result is not None
+                        and result[1].stream > earliest_cache_position
+                    ):
+                        # We have a stream position in the cached range.
+                        cached_positions[room_id] = result[1].stream
+                    else:
+                        # No position in the range, so we remove the entry.
+                        cached_positions.pop(room_id)
+
+        if limit is not None and len(cached_positions) >= limit:
+            return sorted(
+                (
+                    room
+                    for room in sync_room_map.values()
+                    if room.room_id in cached_positions
+                ),
+                # Sort by the last activity (stream_ordering) in the room
+                key=lambda room_info: cached_positions[room_info.room_id],
+                # We want descending order
+                reverse=True,
+            )
+
+        # For fully-joined rooms, we find the latest activity at/before the
+        # `to_token`.
+        joined_room_positions = (
+            await self.store.bulk_get_last_event_pos_in_room_before_stream_ordering(
+                [
+                    room_id
+                    for room_id, room_for_user in sync_room_map.items()
+                    if room_for_user.membership == Membership.JOIN
+                ],
+                to_token.room_key,
+            )
+        )
+
+        last_activity_in_room_map.update(joined_room_positions)
+
+        return sorted(
+            sync_room_map.values(),
+            # Sort by the last activity (stream_ordering) in the room
+            key=lambda room_info: last_activity_in_room_map[room_info.room_id],
+            # We want descending order
+            reverse=True,
+        )
+
+    async def get_is_encrypted_for_room_at_token(
+        self, room_id: str, to_token: RoomStreamToken
+    ) -> bool:
+        """Get if the room is encrypted at the time."""
+
+        # Fetch the current encryption state
+        state_ids = await self.store.get_partial_filtered_current_state_ids(
+            room_id, StateFilter.from_types([(EventTypes.RoomEncryption, "")])
+        )
+        encryption_event_id = state_ids.get((EventTypes.RoomEncryption, ""))
+
+        # Now roll back the state by looking at the state deltas between
+        # to_token and now.
+        deltas = await self.store.get_current_state_deltas_for_room(
+            room_id,
+            from_token=to_token,
+            to_token=self.store.get_room_max_token(),
+        )
+
+        for delta in deltas:
+            if delta.event_type != EventTypes.RoomEncryption:
+                continue
+
+            # Found the first change, we look at the previous event ID to get
+            # the state at the to token.
+
+            if delta.prev_event_id is None:
+                # There is no prev event, so no encryption state event, so room is not encrypted
+                return False
+
+            encryption_event_id = delta.prev_event_id
+            break
+
+        # We didn't find an encryption state, room isn't encrypted
+        if encryption_event_id is None:
+            return False
+
+        # We found encryption state, check if content has a non-null algorithm
+        encrypted_event = await self.store.get_event(encryption_event_id)
+        algorithm = encrypted_event.content.get(EventContentFields.ENCRYPTION_ALGORITHM)
+
+        return algorithm is not None
diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py
new file mode 100644
index 0000000000..d24fccf76f
--- /dev/null
+++ b/synapse/handlers/sliding_sync/store.py
@@ -0,0 +1,128 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2023 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+import logging
+from typing import TYPE_CHECKING, Optional
+
+import attr
+
+from synapse.logging.opentracing import trace
+from synapse.storage.databases.main import DataStore
+from synapse.types import SlidingSyncStreamToken
+from synapse.types.handlers.sliding_sync import (
+    MutablePerConnectionState,
+    PerConnectionState,
+    SlidingSyncConfig,
+)
+
+if TYPE_CHECKING:
+    pass
+
+logger = logging.getLogger(__name__)
+
+
+@attr.s(auto_attribs=True)
+class SlidingSyncConnectionStore:
+    """In-memory store of per-connection state, including what rooms we have
+    previously sent down a sliding sync connection.
+
+    Note: This is NOT safe to run in a worker setup because connection positions will
+    point to different sets of rooms on different workers. e.g. for the same connection,
+    a connection position of 5 might have totally different states on worker A and
+    worker B.
+
+    One complication that we need to deal with here is needing to handle requests being
+    resent, i.e. if we sent down a room in a response that the client received, we must
+    consider the room *not* sent when we get the request again.
+
+    This is handled by using an integer "token", which is returned to the client
+    as part of the sync token. For each connection we store a mapping from
+    tokens to the room states, and create a new entry when we send down new
+    rooms.
+
+    Note that for any given sliding sync connection we will only store a maximum
+    of two different tokens: the previous token from the request and a new token
+    sent in the response. When we receive a request with a given token, we then
+    clear out all other entries with a different token.
+
+    Attributes:
+        _connections: Mapping from `(user_id, conn_id)` to mapping of `token`
+            to mapping of room ID to `HaveSentRoom`.
+    """
+
+    store: "DataStore"
+
+    async def get_and_clear_connection_positions(
+        self,
+        sync_config: SlidingSyncConfig,
+        from_token: Optional[SlidingSyncStreamToken],
+    ) -> PerConnectionState:
+        """Fetch the per-connection state for the token.
+
+        Raises:
+            SlidingSyncUnknownPosition if the connection_token is unknown
+        """
+        # If this is our first request, there is no previous connection state to fetch out of the database
+        if from_token is None or from_token.connection_position == 0:
+            return PerConnectionState()
+
+        conn_id = sync_config.conn_id or ""
+
+        device_id = sync_config.requester.device_id
+        assert device_id is not None
+
+        return await self.store.get_and_clear_connection_positions(
+            sync_config.user.to_string(),
+            device_id,
+            conn_id,
+            from_token.connection_position,
+        )
+
+    @trace
+    async def record_new_state(
+        self,
+        sync_config: SlidingSyncConfig,
+        from_token: Optional[SlidingSyncStreamToken],
+        new_connection_state: MutablePerConnectionState,
+    ) -> int:
+        """Record updated per-connection state, returning the connection
+        position associated with the new state.
+        If there are no changes to the state this may return the same token as
+        the existing per-connection state.
+        """
+        if not new_connection_state.has_updates():
+            if from_token is not None:
+                return from_token.connection_position
+            else:
+                return 0
+
+        # A from token with a zero connection position means there was no
+        # previously stored connection state, so we treat a zero the same as
+        # there being no previous position.
+        previous_connection_position = None
+        if from_token is not None and from_token.connection_position != 0:
+            previous_connection_position = from_token.connection_position
+
+        conn_id = sync_config.conn_id or ""
+
+        device_id = sync_config.requester.device_id
+        assert device_id is not None
+
+        return await self.store.persist_per_connection_state(
+            sync_config.user.to_string(),
+            device_id,
+            conn_id,
+            previous_connection_position,
+            new_connection_state,
+        )
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 1c94f3ca46..8f90c17060 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -293,7 +293,9 @@ class StatsHandler:
                     "history_visibility"
                 )
             elif delta.event_type == EventTypes.RoomEncryption:
-                room_state["encryption"] = event_content.get("algorithm")
+                room_state["encryption"] = event_content.get(
+                    EventContentFields.ENCRYPTION_ALGORITHM
+                )
             elif delta.event_type == EventTypes.Name:
                 room_state["name"] = event_content.get("name")
             elif delta.event_type == EventTypes.Topic:
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index de227faec3..f4ea90fbd7 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -43,6 +43,7 @@ from prometheus_client import Counter
 
 from synapse.api.constants import (
     AccountDataTypes,
+    Direction,
     EventContentFields,
     EventTypes,
     JoinRules,
@@ -64,6 +65,7 @@ from synapse.logging.opentracing import (
 )
 from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
 from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
+from synapse.storage.databases.main.stream import PaginateFunction
 from synapse.storage.roommember import MemberSummary
 from synapse.types import (
     DeviceListUpdates,
@@ -84,7 +86,7 @@ from synapse.util.async_helpers import concurrently_execute
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.caches.lrucache import LruCache
 from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext
-from synapse.util.metrics import Measure, measure_func
+from synapse.util.metrics import Measure
 from synapse.visibility import filter_events_for_client
 
 if TYPE_CHECKING:
@@ -181,10 +183,7 @@ class JoinedSyncResult:
         to tell if room needs to be part of the sync result.
         """
         return bool(
-            self.timeline
-            or self.state
-            or self.ephemeral
-            or self.account_data
+            self.timeline or self.state or self.ephemeral or self.account_data
             # nb the notification count does not, er, count: if there's nothing
             # else in the result, we don't need to send it.
         )
@@ -573,10 +572,10 @@ class SyncHandler:
         if timeout == 0 or since_token is None or full_state:
             # we are going to return immediately, so don't bother calling
             # notifier.wait_for_events.
-            result: Union[SyncResult, E2eeSyncResult] = (
-                await self.current_sync_for_user(
-                    sync_config, sync_version, since_token, full_state=full_state
-                )
+            result: Union[
+                SyncResult, E2eeSyncResult
+            ] = await self.current_sync_for_user(
+                sync_config, sync_version, since_token, full_state=full_state
             )
         else:
             # Otherwise, we wait for something to happen and report it to the user.
@@ -671,10 +670,10 @@ class SyncHandler:
 
             # Go through the `/sync` v2 path
             if sync_version == SyncVersion.SYNC_V2:
-                sync_result: Union[SyncResult, E2eeSyncResult] = (
-                    await self.generate_sync_result(
-                        sync_config, since_token, full_state
-                    )
+                sync_result: Union[
+                    SyncResult, E2eeSyncResult
+                ] = await self.generate_sync_result(
+                    sync_config, since_token, full_state
                 )
             # Go through the MSC3575 Sliding Sync `/sync/e2ee` path
             elif sync_version == SyncVersion.E2EE_SYNC:
@@ -879,22 +878,47 @@ class SyncHandler:
                 since_key = since_token.room_key
 
             while limited and len(recents) < timeline_limit and max_repeat:
-                # If we have a since_key then we are trying to get any events
-                # that have happened since `since_key` up to `end_key`, so we
-                # can just use `get_room_events_stream_for_room`.
-                # Otherwise, we want to return the last N events in the room
-                # in topological ordering.
-                if since_key:
-                    events, end_key = await self.store.get_room_events_stream_for_room(
-                        room_id,
-                        limit=load_limit + 1,
-                        from_key=since_key,
-                        to_key=end_key,
-                    )
-                else:
-                    events, end_key = await self.store.get_recent_events_for_room(
-                        room_id, limit=load_limit + 1, end_token=end_key
-                    )
+                # For initial `/sync`, we want to view a historical section of the
+                # timeline; to fetch events by `topological_ordering` (best
+                # representation of the room DAG as others were seeing it at the time).
+                # This also aligns with the order that `/messages` returns events in.
+                #
+                # For incremental `/sync`, we want to get all updates for rooms since
+                # the last `/sync` (regardless if those updates arrived late or happened
+                # a while ago in the past); to fetch events by `stream_ordering` (in the
+                # order they were received by the server).
+                #
+                # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917
+                #
+                # FIXME: Using workaround for mypy,
+                # https://github.com/python/mypy/issues/10740#issuecomment-1997047277 and
+                # https://github.com/python/mypy/issues/17479
+                paginate_room_events_by_topological_ordering: PaginateFunction = (
+                    self.store.paginate_room_events_by_topological_ordering
+                )
+                paginate_room_events_by_stream_ordering: PaginateFunction = (
+                    self.store.paginate_room_events_by_stream_ordering
+                )
+                pagination_method: PaginateFunction = (
+                    # Use `topographical_ordering` for historical events
+                    paginate_room_events_by_topological_ordering
+                    if since_key is None
+                    # Use `stream_ordering` for updates
+                    else paginate_room_events_by_stream_ordering
+                )
+                events, end_key, limited = await pagination_method(
+                    room_id=room_id,
+                    # The bounds are reversed so we can paginate backwards
+                    # (from newer to older events) starting at to_bound.
+                    # This ensures we fill the `limit` with the newest events first,
+                    from_key=end_key,
+                    to_key=since_key,
+                    direction=Direction.BACKWARDS,
+                    limit=load_limit,
+                )
+                # We want to return the events in ascending order (the last event is the
+                # most recent).
+                events.reverse()
 
                 log_kv({"loaded_recents": len(events)})
 
@@ -945,9 +969,6 @@ class SyncHandler:
                 loaded_recents.extend(recents)
                 recents = loaded_recents
 
-                if len(events) <= load_limit:
-                    limited = False
-                    break
                 max_repeat -= 1
 
             if len(recents) > timeline_limit:
@@ -1459,13 +1480,16 @@ class SyncHandler:
                     # timeline here. The caller will then dedupe any redundant
                     # ones.
 
-                    state_ids = await self._state_storage_controller.get_state_ids_for_event(
-                        batch.events[0].event_id,
-                        # we only want members!
-                        state_filter=StateFilter.from_types(
-                            (EventTypes.Member, member) for member in members_to_fetch
-                        ),
-                        await_full_state=False,
+                    state_ids = (
+                        await self._state_storage_controller.get_state_ids_for_event(
+                            batch.events[0].event_id,
+                            # we only want members!
+                            state_filter=StateFilter.from_types(
+                                (EventTypes.Member, member)
+                                for member in members_to_fetch
+                            ),
+                            await_full_state=False,
+                        )
                     )
             return state_ids
 
@@ -1750,8 +1774,15 @@ class SyncHandler:
                     )
 
                 if include_device_list_updates:
-                    device_lists = await self._generate_sync_entry_for_device_list(
-                        sync_result_builder,
+                    # include_device_list_updates can only be True if we have a
+                    # since token.
+                    assert since_token is not None
+
+                    device_lists = await self._device_handler.generate_sync_entry_for_device_list(
+                        user_id=user_id,
+                        since_token=since_token,
+                        now_token=sync_result_builder.now_token,
+                        joined_room_ids=sync_result_builder.joined_room_ids,
                         newly_joined_rooms=newly_joined_rooms,
                         newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
                         newly_left_rooms=newly_left_rooms,
@@ -1863,8 +1894,14 @@ class SyncHandler:
                 newly_left_users,
             ) = sync_result_builder.calculate_user_changes()
 
-            device_lists = await self._generate_sync_entry_for_device_list(
-                sync_result_builder,
+            # include_device_list_updates can only be True if we have a
+            # since token.
+            assert since_token is not None
+            device_lists = await self._device_handler.generate_sync_entry_for_device_list(
+                user_id=user_id,
+                since_token=since_token,
+                now_token=sync_result_builder.now_token,
+                joined_room_ids=sync_result_builder.joined_room_ids,
                 newly_joined_rooms=newly_joined_rooms,
                 newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
                 newly_left_rooms=newly_left_rooms,
@@ -2041,94 +2078,6 @@ class SyncHandler:
 
         return sync_result_builder
 
-    @measure_func("_generate_sync_entry_for_device_list")
-    async def _generate_sync_entry_for_device_list(
-        self,
-        sync_result_builder: "SyncResultBuilder",
-        newly_joined_rooms: AbstractSet[str],
-        newly_joined_or_invited_or_knocked_users: AbstractSet[str],
-        newly_left_rooms: AbstractSet[str],
-        newly_left_users: AbstractSet[str],
-    ) -> DeviceListUpdates:
-        """Generate the DeviceListUpdates section of sync
-
-        Args:
-            sync_result_builder
-            newly_joined_rooms: Set of rooms user has joined since previous sync
-            newly_joined_or_invited_or_knocked_users: Set of users that have joined,
-                been invited to a room or are knocking on a room since
-                previous sync.
-            newly_left_rooms: Set of rooms user has left since previous sync
-            newly_left_users: Set of users that have left a room we're in since
-                previous sync
-        """
-
-        user_id = sync_result_builder.sync_config.user.to_string()
-        since_token = sync_result_builder.since_token
-        assert since_token is not None
-
-        # Take a copy since these fields will be mutated later.
-        newly_joined_or_invited_or_knocked_users = set(
-            newly_joined_or_invited_or_knocked_users
-        )
-        newly_left_users = set(newly_left_users)
-
-        # We want to figure out what user IDs the client should refetch
-        # device keys for, and which users we aren't going to track changes
-        # for anymore.
-        #
-        # For the first step we check:
-        #   a. if any users we share a room with have updated their devices,
-        #      and
-        #   b. we also check if we've joined any new rooms, or if a user has
-        #      joined a room we're in.
-        #
-        # For the second step we just find any users we no longer share a
-        # room with by looking at all users that have left a room plus users
-        # that were in a room we've left.
-
-        users_that_have_changed = set()
-
-        joined_room_ids = sync_result_builder.joined_room_ids
-
-        # Step 1a, check for changes in devices of users we share a room
-        # with
-        users_that_have_changed = (
-            await self._device_handler.get_device_changes_in_shared_rooms(
-                user_id,
-                joined_room_ids,
-                from_token=since_token,
-                now_token=sync_result_builder.now_token,
-            )
-        )
-
-        # Step 1b, check for newly joined rooms
-        for room_id in newly_joined_rooms:
-            joined_users = await self.store.get_users_in_room(room_id)
-            newly_joined_or_invited_or_knocked_users.update(joined_users)
-
-        # TODO: Check that these users are actually new, i.e. either they
-        # weren't in the previous sync *or* they left and rejoined.
-        users_that_have_changed.update(newly_joined_or_invited_or_knocked_users)
-
-        user_signatures_changed = await self.store.get_users_whose_signatures_changed(
-            user_id, since_token.device_list_key
-        )
-        users_that_have_changed.update(user_signatures_changed)
-
-        # Now find users that we no longer track
-        for room_id in newly_left_rooms:
-            left_users = await self.store.get_users_in_room(room_id)
-            newly_left_users.update(left_users)
-
-        # Remove any users that we still share a room with.
-        left_users_rooms = await self.store.get_rooms_for_users(newly_left_users)
-        for user_id, entries in left_users_rooms.items():
-            if any(rid in joined_room_ids for rid in entries):
-                newly_left_users.discard(user_id)
-
-        return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users)
-
     @trace
     async def _generate_sync_entry_for_to_device(
         self, sync_result_builder: "SyncResultBuilder"
@@ -2212,18 +2161,18 @@ class SyncHandler:
 
             if push_rules_changed:
                 global_account_data = dict(global_account_data)
-                global_account_data[AccountDataTypes.PUSH_RULES] = (
-                    await self._push_rules_handler.push_rules_for_user(sync_config.user)
-                )
+                global_account_data[
+                    AccountDataTypes.PUSH_RULES
+                ] = await self._push_rules_handler.push_rules_for_user(sync_config.user)
         else:
             all_global_account_data = await self.store.get_global_account_data_for_user(
                 user_id
             )
 
             global_account_data = dict(all_global_account_data)
-            global_account_data[AccountDataTypes.PUSH_RULES] = (
-                await self._push_rules_handler.push_rules_for_user(sync_config.user)
-            )
+            global_account_data[
+                AccountDataTypes.PUSH_RULES
+            ] = await self._push_rules_handler.push_rules_for_user(sync_config.user)
 
         account_data_for_user = (
             await sync_config.filter_collection.filter_global_account_data(
@@ -2270,7 +2219,11 @@ class SyncHandler:
             user=user,
             from_key=presence_key,
             is_guest=sync_config.is_guest,
-            include_offline=include_offline,
+            include_offline=(
+                True
+                if self.hs_config.server.presence_include_offline_users_on_sync
+                else include_offline
+            ),
         )
         assert presence_key
         sync_result_builder.now_token = now_token.copy_and_replace(
@@ -2637,9 +2590,10 @@ class SyncHandler:
         # a "gap" in the timeline, as described by the spec for /sync.
         room_to_events = await self.store.get_room_events_stream_for_rooms(
             room_ids=sync_result_builder.joined_room_ids,
-            from_key=since_token.room_key,
-            to_key=now_token.room_key,
+            from_key=now_token.room_key,
+            to_key=since_token.room_key,
             limit=timeline_limit + 1,
+            direction=Direction.BACKWARDS,
         )
 
         # We loop through all room ids, even if there are no new events, in case
@@ -2649,7 +2603,10 @@ class SyncHandler:
 
             newly_joined = room_id in newly_joined_rooms
             if room_entry:
-                events, start_key = room_entry
+                events, start_key, _ = room_entry
+                # We want to return the events in ascending order (the last event is the
+                # most recent).
+                events.reverse()
 
                 prev_batch_token = now_token.copy_and_replace(
                     StreamKeyType.ROOM, start_key
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 4c87718337..8d693fee30 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -565,7 +565,12 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]):
         room_ids: Iterable[str],
         is_guest: bool,
         explicit_room_id: Optional[str] = None,
+        to_key: Optional[int] = None,
     ) -> Tuple[List[JsonMapping], int]:
+        """
+        Find typing notifications for given rooms (> `from_token` and <= `to_token`)
+        """
+
         with Measure(self.clock, "typing.get_new_events"):
             from_key = int(from_key)
             handler = self.get_typing_handler()
@@ -574,7 +579,9 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]):
             for room_id in room_ids:
                 if room_id not in handler._room_serials:
                     continue
-                if handler._room_serials[room_id] <= from_key:
+                if handler._room_serials[room_id] <= from_key or (
+                    to_key is not None and handler._room_serials[room_id] > to_key
+                ):
                     continue
 
                 events.append(self._make_event_for(room_id))
diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py
index 7e578cf462..db998f6701 100644
--- a/synapse/handlers/worker_lock.py
+++ b/synapse/handlers/worker_lock.py
@@ -183,7 +183,7 @@ class WorkerLocksHandler:
             return
 
         def _wake_all_locks(
-            locks: Collection[Union[WaitingLock, WaitingMultiLock]]
+            locks: Collection[Union[WaitingLock, WaitingMultiLock]],
         ) -> None:
             for lock in locks:
                 deferred = lock.deferred
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 56ad28eabf..c3b2299c95 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -1039,7 +1039,7 @@ class _MultipartParserProtocol(protocol.Protocol):
         self.deferred = deferred
         self.boundary = boundary
         self.max_length = max_length
-        self.parser = None
+        self.parser: Optional[multipart.MultipartParser] = None
         self.multipart_response = MultipartResponse()
         self.has_redirect = False
         self.in_json = False
@@ -1057,11 +1057,11 @@ class _MultipartParserProtocol(protocol.Protocol):
         if not self.parser:
 
             def on_header_field(data: bytes, start: int, end: int) -> None:
-                if data[start:end] == b"Location":
+                if data[start:end].lower() == b"location":
                     self.has_redirect = True
-                if data[start:end] == b"Content-Disposition":
+                if data[start:end].lower() == b"content-disposition":
                     self.in_disposition = True
-                if data[start:end] == b"Content-Type":
+                if data[start:end].lower() == b"content-type":
                     self.in_content_type = True
 
             def on_header_value(data: bytes, start: int, end: int) -> None:
@@ -1088,7 +1088,6 @@ class _MultipartParserProtocol(protocol.Protocol):
                     return
                 # otherwise we are in the file part
                 else:
-                    logger.info("Writing multipart file data to stream")
                     try:
                         self.stream.write(data[start:end])
                     except Exception as e:
@@ -1098,7 +1097,7 @@ class _MultipartParserProtocol(protocol.Protocol):
                         self.deferred.errback()
                     self.file_length += end - start
 
-            callbacks = {
+            callbacks: "multipart.multipart.MultipartCallbacks" = {
                 "on_header_field": on_header_field,
                 "on_header_value": on_header_value,
                 "on_part_data": on_part_data,
@@ -1114,7 +1113,7 @@ class _MultipartParserProtocol(protocol.Protocol):
             self.transport.abortConnection()
 
         try:
-            self.parser.write(incoming_data)  # type: ignore[attr-defined]
+            self.parser.write(incoming_data)
         except Exception as e:
             logger.warning(f"Exception writing to multipart parser: {e}")
             self.deferred.errback()
@@ -1314,6 +1313,5 @@ def is_unknown_endpoint(
         )
     ) or (
         # Older Synapses returned a 400 error.
-        e.code == 400
-        and synapse_error.errcode == Codes.UNRECOGNIZED
+        e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED
     )
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 749b01dd0e..e658c68e23 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -19,7 +19,6 @@
 #
 #
 import abc
-import cgi
 import codecs
 import logging
 import random
@@ -90,7 +89,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.logging.opentracing import set_tag, start_active_span, tags
 from synapse.types import JsonDict
 from synapse.util import json_decoder
-from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred
+from synapse.util.async_helpers import AwakenableSleeper, Linearizer, timeout_deferred
 from synapse.util.metrics import Measure
 from synapse.util.stringutils import parse_and_validate_server_name
 
@@ -475,6 +474,8 @@ class MatrixFederationHttpClient:
             use_proxy=True,
         )
 
+        self.remote_download_linearizer = Linearizer("remote_download_linearizer", 6)
+
     def wake_destination(self, destination: str) -> None:
         """Called when the remote server may have come back online."""
 
@@ -790,7 +791,7 @@ class MatrixFederationHttpClient:
                                 url_str,
                                 _flatten_response_never_received(e),
                             )
-                            body = None
+                            body = b""
 
                         exc = HttpResponseException(
                             response.code, response_phrase, body
@@ -1486,35 +1487,44 @@ class MatrixFederationHttpClient:
         )
 
         headers = dict(response.headers.getAllRawHeaders())
-
         expected_size = response.length
-        # if we don't get an expected length then use the max length
+
         if expected_size == UNKNOWN_LENGTH:
             expected_size = max_size
-            logger.debug(
-                f"File size unknown, assuming file is max allowable size: {max_size}"
-            )
+        else:
+            if int(expected_size) > max_size:
+                msg = "Requested file is too large > %r bytes" % (max_size,)
+                logger.warning(
+                    "{%s} [%s] %s",
+                    request.txn_id,
+                    request.destination,
+                    msg,
+                )
+                raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
 
-        read_body, _ = await download_ratelimiter.can_do_action(
-            requester=None,
-            key=ip_address,
-            n_actions=expected_size,
-        )
-        if not read_body:
-            msg = "Requested file size exceeds ratelimits"
-            logger.warning(
-                "{%s} [%s] %s",
-                request.txn_id,
-                request.destination,
-                msg,
+            read_body, _ = await download_ratelimiter.can_do_action(
+                requester=None,
+                key=ip_address,
+                n_actions=expected_size,
             )
-            raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
+            if not read_body:
+                msg = "Requested file size exceeds ratelimits"
+                logger.warning(
+                    "{%s} [%s] %s",
+                    request.txn_id,
+                    request.destination,
+                    msg,
+                )
+                raise SynapseError(
+                    HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED
+                )
 
         try:
-            # add a byte of headroom to max size as function errs at >=
-            d = read_body_with_max_size(response, output_stream, expected_size + 1)
-            d.addTimeout(self.default_timeout_seconds, self.reactor)
-            length = await make_deferred_yieldable(d)
+            async with self.remote_download_linearizer.queue(ip_address):
+                # add a byte of headroom to max size as function errs at >=
+                d = read_body_with_max_size(response, output_stream, expected_size + 1)
+                d.addTimeout(self.default_timeout_seconds, self.reactor)
+                length = await make_deferred_yieldable(d)
         except BodyExceededMaxSize:
             msg = "Requested file is too large > %r bytes" % (expected_size,)
             logger.warning(
@@ -1560,6 +1570,13 @@ class MatrixFederationHttpClient:
             request.method,
             request.uri.decode("ascii"),
         )
+
+        # if we didn't know the length upfront, decrement the actual size from ratelimiter
+        if response.length == UNKNOWN_LENGTH:
+            download_ratelimiter.record_action(
+                requester=None, key=ip_address, n_actions=length
+            )
+
         return length, headers
 
     async def federation_get_file(
@@ -1630,29 +1647,37 @@ class MatrixFederationHttpClient:
         )
 
         headers = dict(response.headers.getAllRawHeaders())
-
         expected_size = response.length
-        # if we don't get an expected length then use the max length
+
         if expected_size == UNKNOWN_LENGTH:
             expected_size = max_size
-            logger.debug(
-                f"File size unknown, assuming file is max allowable size: {max_size}"
-            )
+        else:
+            if int(expected_size) > max_size:
+                msg = "Requested file is too large > %r bytes" % (max_size,)
+                logger.warning(
+                    "{%s} [%s] %s",
+                    request.txn_id,
+                    request.destination,
+                    msg,
+                )
+                raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
 
-        read_body, _ = await download_ratelimiter.can_do_action(
-            requester=None,
-            key=ip_address,
-            n_actions=expected_size,
-        )
-        if not read_body:
-            msg = "Requested file size exceeds ratelimits"
-            logger.warning(
-                "{%s} [%s] %s",
-                request.txn_id,
-                request.destination,
-                msg,
+            read_body, _ = await download_ratelimiter.can_do_action(
+                requester=None,
+                key=ip_address,
+                n_actions=expected_size,
             )
-            raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
+            if not read_body:
+                msg = "Requested file size exceeds ratelimits"
+                logger.warning(
+                    "{%s} [%s] %s",
+                    request.txn_id,
+                    request.destination,
+                    msg,
+                )
+                raise SynapseError(
+                    HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED
+                )
 
         # this should be a multipart/mixed response with the boundary string in the header
         try:
@@ -1672,11 +1697,12 @@ class MatrixFederationHttpClient:
             raise SynapseError(HTTPStatus.BAD_GATEWAY, msg)
 
         try:
-            # add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
-            deferred = read_multipart_response(
-                response, output_stream, boundary, expected_size + 1
-            )
-            deferred.addTimeout(self.default_timeout_seconds, self.reactor)
+            async with self.remote_download_linearizer.queue(ip_address):
+                # add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
+                deferred = read_multipart_response(
+                    response, output_stream, boundary, expected_size + 1
+                )
+                deferred.addTimeout(self.default_timeout_seconds, self.reactor)
         except BodyExceededMaxSize:
             msg = "Requested file is too large > %r bytes" % (expected_size,)
             logger.warning(
@@ -1729,8 +1755,10 @@ class MatrixFederationHttpClient:
                 request.destination,
                 str_url,
             )
+            # We don't know how large the response will be upfront, so limit it to
+            # the `max_size` config value.
             length, headers, _, _ = await self._simple_http_client.get_file(
-                str_url, output_stream, expected_size
+                str_url, output_stream, max_size
             )
 
         logger.info(
@@ -1743,6 +1771,13 @@ class MatrixFederationHttpClient:
             request.method,
             request.uri.decode("ascii"),
         )
+
+        # if we didn't know the length upfront, decrement the actual size from ratelimiter
+        if response.length == UNKNOWN_LENGTH:
+            download_ratelimiter.record_action(
+                requester=None, key=ip_address, n_actions=length
+            )
+
         return length, headers, multipart_response.json
 
 
@@ -1777,8 +1812,9 @@ def check_content_type_is(headers: Headers, expected_content_type: str) -> None:
         )
 
     c_type = content_type_headers[0].decode("ascii")  # only the first header
-    val, options = cgi.parse_header(c_type)
-    if val != expected_content_type:
+    # Extract the 'essence' of the mimetype, removing any parameter
+    c_type_parsed = c_type.split(";", 1)[0].strip()
+    if c_type_parsed != expected_content_type:
         raise RequestSendFailed(
             RuntimeError(
                 f"Remote server sent Content-Type header of '{c_type}', not '{expected_content_type}'",
diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py
index 5b5ded757b..97aa429e7d 100644
--- a/synapse/http/proxy.py
+++ b/synapse/http/proxy.py
@@ -62,6 +62,15 @@ HOP_BY_HOP_HEADERS = {
     "Upgrade",
 }
 
+if hasattr(Headers, "_canonicalNameCaps"):
+    # Twisted < 24.7.0rc1
+    _canonicalHeaderName = Headers()._canonicalNameCaps  # type: ignore[attr-defined]
+else:
+    # Twisted >= 24.7.0rc1
+    # But note that `_encodeName` still exists on prior versions,
+    # it just encodes differently
+    _canonicalHeaderName = Headers()._encodeName
+
 
 def parse_connection_header_value(
     connection_header_value: Optional[bytes],
@@ -85,11 +94,10 @@ def parse_connection_header_value(
         The set of header names that should not be copied over from the remote response.
         The keys are capitalized in canonical capitalization.
     """
-    headers = Headers()
     extra_headers_to_remove: Set[str] = set()
     if connection_header_value:
         extra_headers_to_remove = {
-            headers._canonicalNameCaps(connection_option.strip()).decode("ascii")
+            _canonicalHeaderName(connection_option.strip()).decode("ascii")
             for connection_option in connection_header_value.split(b",")
         }
 
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 211795dc39..3e2d94d399 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -233,7 +233,7 @@ def return_html_error(
 
 
 def wrap_async_request_handler(
-    h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]]
+    h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]],
 ) -> Callable[["_AsyncResource", "SynapseRequest"], "defer.Deferred[None]"]:
     """Wraps an async request handler so that it calls request.processing.
 
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 08b8ff7afd..0330f1c878 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -37,19 +37,17 @@ from typing import (
     overload,
 )
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import BaseModel, MissingError, PydanticValueError, ValidationError
-    from pydantic.v1.error_wrappers import ErrorWrapper
-else:
-    from pydantic import BaseModel, MissingError, PydanticValueError, ValidationError
-    from pydantic.error_wrappers import ErrorWrapper
-
 from typing_extensions import Literal
 
 from twisted.web.server import Request
 
+from synapse._pydantic_compat import (
+    BaseModel,
+    ErrorWrapper,
+    MissingError,
+    PydanticValueError,
+    ValidationError,
+)
 from synapse.api.errors import Codes, SynapseError
 from synapse.http import redact_uri
 from synapse.http.server import HttpServer
diff --git a/synapse/http/site.py b/synapse/http/site.py
index a5b5780679..8bf63edd36 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -658,7 +658,7 @@ class SynapseSite(ProxySite):
         )
 
         self.site_tag = site_tag
-        self.reactor = reactor
+        self.reactor: ISynapseReactor = reactor
 
         assert config.http_options is not None
         proxied = config.http_options.x_forwarded
@@ -683,7 +683,7 @@ class SynapseSite(ProxySite):
         self.access_logger = logging.getLogger(logger_name)
         self.server_version_string = server_version_string.encode("ascii")
 
-    def log(self, request: SynapseRequest) -> None:
+    def log(self, request: SynapseRequest) -> None:  # type: ignore[override]
         pass
 
 
diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py
index 6a6afbfc0b..d9ff70b252 100644
--- a/synapse/logging/_terse_json.py
+++ b/synapse/logging/_terse_json.py
@@ -22,6 +22,7 @@
 """
 Log formatters that output terse JSON.
 """
+
 import json
 import logging
 
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 4650b60962..ae2b3d11c0 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -20,7 +20,7 @@
 #
 #
 
-""" Thread-local-alike tracking of log contexts within synapse
+"""Thread-local-alike tracking of log contexts within synapse
 
 This module provides objects and utilities for tracking contexts through
 synapse code, so that log lines can include a request identifier, and so that
@@ -29,6 +29,7 @@ them.
 
 See doc/log_contexts.rst for details on how this works.
 """
+
 import logging
 import threading
 import typing
@@ -751,7 +752,7 @@ def preserve_fn(
     f: Union[
         Callable[P, R],
         Callable[P, Awaitable[R]],
-    ]
+    ],
 ) -> Callable[P, "defer.Deferred[R]"]:
     """Function decorator which wraps the function with run_in_background"""
 
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index 7a3c805cc5..d976e58e49 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -169,6 +169,7 @@ Gotchas
   than one caller? Will all of those calling functions have be in a context
   with an active span?
 """
+
 import contextlib
 import enum
 import inspect
@@ -414,7 +415,7 @@ def ensure_active_span(
     """
 
     def ensure_active_span_inner_1(
-        func: Callable[P, R]
+        func: Callable[P, R],
     ) -> Callable[P, Union[Optional[T], R]]:
         @wraps(func)
         def ensure_active_span_inner_2(
@@ -700,7 +701,7 @@ def set_operation_name(operation_name: str) -> None:
 
 @only_if_tracing
 def force_tracing(
-    span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel
+    span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel,
 ) -> None:
     """Force sampling for the active/given span and its children.
 
@@ -1032,13 +1033,13 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]:
     def _wrapping_logic(
         _func: Callable[P, R], *args: P.args, **kwargs: P.kwargs
     ) -> Generator[None, None, None]:
-        # We use `[1:]` to skip the `self` object reference and `start=1` to
-        # make the index line up with `argspec.args`.
-        #
-        # FIXME: We could update this to handle any type of function by ignoring the
-        #   first argument only if it's named `self` or `cls`. This isn't fool-proof
-        #   but handles the idiomatic cases.
-        for i, arg in enumerate(args[1:], start=1):
+        for i, arg in enumerate(args, start=0):
+            if argspec.args[i] in ("self", "cls"):
+                # Ignore `self` and `cls` values. Ideally we'd properly detect
+                # if we were wrapping a method, but that is really non-trivial
+                # and this is good enough.
+                continue
+
             set_tag(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg))
         set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :]))
         set_tag(SynapseTags.FUNC_KWARGS, str(kwargs))
@@ -1093,9 +1094,10 @@ def trace_servlet(
 
             # Mypy seems to think that start_context.tag below can be Optional[str], but
             # that doesn't appear to be correct and works in practice.
-            request_tags[
-                SynapseTags.REQUEST_TAG
-            ] = request.request_metrics.start_context.tag  # type: ignore[assignment]
+
+            request_tags[SynapseTags.REQUEST_TAG] = (
+                request.request_metrics.start_context.tag  # type: ignore[assignment]
+            )
 
             # set the tags *after* the servlet completes, in case it decided to
             # prioritise the span (tags will get dropped on unprioritised spans)
diff --git a/synapse/media/_base.py b/synapse/media/_base.py
index 1b268ce4d4..7877df62fa 100644
--- a/synapse/media/_base.py
+++ b/synapse/media/_base.py
@@ -28,6 +28,7 @@ from types import TracebackType
 from typing import (
     TYPE_CHECKING,
     Awaitable,
+    BinaryIO,
     Dict,
     Generator,
     List,
@@ -37,21 +38,28 @@ from typing import (
 )
 
 import attr
+from zope.interface import implementer
 
+from twisted.internet import interfaces
+from twisted.internet.defer import Deferred
 from twisted.internet.interfaces import IConsumer
-from twisted.protocols.basic import FileSender
+from twisted.python.failure import Failure
 from twisted.web.server import Request
 
 from synapse.api.errors import Codes, cs_error
 from synapse.http.server import finish_request, respond_with_json
 from synapse.http.site import SynapseRequest
-from synapse.logging.context import make_deferred_yieldable
+from synapse.logging.context import (
+    defer_to_threadpool,
+    make_deferred_yieldable,
+    run_in_background,
+)
 from synapse.util import Clock
+from synapse.util.async_helpers import DeferredEvent
 from synapse.util.stringutils import is_ascii
 
 if TYPE_CHECKING:
-    from synapse.storage.databases.main.media_repository import LocalMedia
-
+    from synapse.server import HomeServer
 
 logger = logging.getLogger(__name__)
 
@@ -122,6 +130,7 @@ def respond_404(request: SynapseRequest) -> None:
 
 
 async def respond_with_file(
+    hs: "HomeServer",
     request: SynapseRequest,
     media_type: str,
     file_path: str,
@@ -138,7 +147,7 @@ async def respond_with_file(
         add_file_headers(request, media_type, file_size, upload_name)
 
         with open(file_path, "rb") as f:
-            await make_deferred_yieldable(FileSender().beginFileTransfer(f, request))
+            await ThreadedFileSender(hs).beginFileTransfer(f, request)
 
         finish_request(request)
     else:
@@ -279,7 +288,9 @@ async def respond_with_multipart_responder(
     clock: Clock,
     request: SynapseRequest,
     responder: "Optional[Responder]",
-    media_info: "LocalMedia",
+    media_type: str,
+    media_length: Optional[int],
+    upload_name: Optional[str],
 ) -> None:
     """
     Responds to requests originating from the federation media `/download` endpoint by
@@ -303,7 +314,7 @@ async def respond_with_multipart_responder(
             )
             return
 
-        if media_info.media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES:
+        if media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES:
             disposition = "inline"
         else:
             disposition = "attachment"
@@ -311,16 +322,16 @@ async def respond_with_multipart_responder(
         def _quote(x: str) -> str:
             return urllib.parse.quote(x.encode("utf-8"))
 
-        if media_info.upload_name:
-            if _can_encode_filename_as_token(media_info.upload_name):
+        if upload_name:
+            if _can_encode_filename_as_token(upload_name):
                 disposition = "%s; filename=%s" % (
                     disposition,
-                    media_info.upload_name,
+                    upload_name,
                 )
             else:
                 disposition = "%s; filename*=utf-8''%s" % (
                     disposition,
-                    _quote(media_info.upload_name),
+                    _quote(upload_name),
                 )
 
         from synapse.media.media_storage import MultipartFileConsumer
@@ -330,14 +341,14 @@ async def respond_with_multipart_responder(
         multipart_consumer = MultipartFileConsumer(
             clock,
             request,
-            media_info.media_type,
+            media_type,
             {},
             disposition,
-            media_info.media_length,
+            media_length,
         )
 
         logger.debug("Responding to media request with responder %s", responder)
-        if media_info.media_length is not None:
+        if media_length is not None:
             content_length = multipart_consumer.content_length()
             assert content_length is not None
             request.setHeader(b"Content-Length", b"%d" % (content_length,))
@@ -601,3 +612,151 @@ def _parseparam(s: bytes) -> Generator[bytes, None, None]:
         f = s[:end]
         yield f.strip()
         s = s[end:]
+
+
+@implementer(interfaces.IPushProducer)
+class ThreadedFileSender:
+    """
+    A producer that sends the contents of a file to a consumer, reading from the
+    file on a thread.
+
+    This works by having a loop in a threadpool repeatedly reading from the
+    file, until the consumer pauses the producer. There is then a loop in the
+    main thread that waits until the consumer resumes the producer and then
+    starts reading in the threadpool again.
+
+    This is done to ensure that we're never waiting in the threadpool, as
+    otherwise its easy to starve it of threads.
+    """
+
+    # How much data to read in one go.
+    CHUNK_SIZE = 2**14
+
+    # How long we wait for the consumer to be ready again before aborting the
+    # read.
+    TIMEOUT_SECONDS = 90.0
+
+    def __init__(self, hs: "HomeServer") -> None:
+        self.reactor = hs.get_reactor()
+        self.thread_pool = hs.get_media_sender_thread_pool()
+
+        self.file: Optional[BinaryIO] = None
+        self.deferred: "Deferred[None]" = Deferred()
+        self.consumer: Optional[interfaces.IConsumer] = None
+
+        # Signals if the thread should keep reading/sending data. Set means
+        # continue, clear means pause.
+        self.wakeup_event = DeferredEvent(self.reactor)
+
+        # Signals if the thread should terminate, e.g. because the consumer has
+        # gone away.
+        self.stop_writing = False
+
+    def beginFileTransfer(
+        self, file: BinaryIO, consumer: interfaces.IConsumer
+    ) -> "Deferred[None]":
+        """
+        Begin transferring a file
+        """
+        self.file = file
+        self.consumer = consumer
+
+        self.consumer.registerProducer(self, True)
+
+        # We set the wakeup signal as we should start producing immediately.
+        self.wakeup_event.set()
+        run_in_background(self.start_read_loop)
+
+        return make_deferred_yieldable(self.deferred)
+
+    def resumeProducing(self) -> None:
+        """interfaces.IPushProducer"""
+        self.wakeup_event.set()
+
+    def pauseProducing(self) -> None:
+        """interfaces.IPushProducer"""
+        self.wakeup_event.clear()
+
+    def stopProducing(self) -> None:
+        """interfaces.IPushProducer"""
+
+        # Unregister the consumer so we don't try and interact with it again.
+        if self.consumer:
+            self.consumer.unregisterProducer()
+
+        self.consumer = None
+
+        # Terminate the loop.
+        self.stop_writing = True
+        self.wakeup_event.set()
+
+        if not self.deferred.called:
+            self.deferred.errback(Exception("Consumer asked us to stop producing"))
+
+    async def start_read_loop(self) -> None:
+        """This is the loop that drives reading/writing"""
+        try:
+            while not self.stop_writing:
+                # Start the loop in the threadpool to read data.
+                more_data = await defer_to_threadpool(
+                    self.reactor, self.thread_pool, self._on_thread_read_loop
+                )
+                if not more_data:
+                    # Reached EOF, we can just return.
+                    return
+
+                if not self.wakeup_event.is_set():
+                    ret = await self.wakeup_event.wait(self.TIMEOUT_SECONDS)
+                    if not ret:
+                        raise Exception("Timed out waiting to resume")
+        except Exception:
+            self._error(Failure())
+        finally:
+            self._finish()
+
+    def _on_thread_read_loop(self) -> bool:
+        """This is the loop that happens on a thread.
+
+        Returns:
+            Whether there is more data to send.
+        """
+
+        while not self.stop_writing and self.wakeup_event.is_set():
+            # The file should always have been set before we get here.
+            assert self.file is not None
+
+            chunk = self.file.read(self.CHUNK_SIZE)
+            if not chunk:
+                return False
+
+            self.reactor.callFromThread(self._write, chunk)
+
+        return True
+
+    def _write(self, chunk: bytes) -> None:
+        """Called from the thread to write a chunk of data"""
+        if self.consumer:
+            self.consumer.write(chunk)
+
+    def _error(self, failure: Failure) -> None:
+        """Called when there was a fatal error"""
+        if self.consumer:
+            self.consumer.unregisterProducer()
+            self.consumer = None
+
+        if not self.deferred.called:
+            self.deferred.errback(failure)
+
+    def _finish(self) -> None:
+        """Called when we have finished writing (either on success or
+        failure)."""
+        if self.file:
+            self.file.close()
+            self.file = None
+
+        if self.consumer:
+            self.consumer.unregisterProducer()
+            self.consumer = None
+
+        if not self.deferred.called:
+            self.deferred.callback(None)
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index 542642b900..0b74209232 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -430,6 +430,7 @@ class MediaRepository:
         media_id: str,
         name: Optional[str],
         max_timeout_ms: int,
+        allow_authenticated: bool = True,
         federation: bool = False,
     ) -> None:
         """Responds to requests for local media, if exists, or returns 404.
@@ -442,6 +443,7 @@ class MediaRepository:
                 the filename in the Content-Disposition header of the response.
             max_timeout_ms: the maximum number of milliseconds to wait for the
                 media to be uploaded.
+            allow_authenticated: whether media marked as authenticated may be served to this request
             federation: whether the local media being fetched is for a federation request
 
         Returns:
@@ -451,6 +453,10 @@ class MediaRepository:
         if not media_info:
             return
 
+        if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
+            if media_info.authenticated:
+                raise NotFoundError()
+
         self.mark_recently_accessed(None, media_id)
 
         media_type = media_info.media_type
@@ -465,7 +471,7 @@ class MediaRepository:
         responder = await self.media_storage.fetch_media(file_info)
         if federation:
             await respond_with_multipart_responder(
-                self.clock, request, responder, media_info
+                self.clock, request, responder, media_type, media_length, upload_name
             )
         else:
             await respond_with_responder(
@@ -481,6 +487,7 @@ class MediaRepository:
         max_timeout_ms: int,
         ip_address: str,
         use_federation_endpoint: bool,
+        allow_authenticated: bool = True,
     ) -> None:
         """Respond to requests for remote media.
 
@@ -495,6 +502,8 @@ class MediaRepository:
             ip_address: the IP address of the requester
             use_federation_endpoint: whether to request the remote media over the new
                 federation `/download` endpoint
+            allow_authenticated: whether media marked as authenticated may be served to this
+                request
 
         Returns:
             Resolves once a response has successfully been written to request
@@ -526,6 +535,7 @@ class MediaRepository:
                 self.download_ratelimiter,
                 ip_address,
                 use_federation_endpoint,
+                allow_authenticated,
             )
 
         # We deliberately stream the file outside the lock
@@ -542,7 +552,13 @@ class MediaRepository:
             respond_404(request)
 
     async def get_remote_media_info(
-        self, server_name: str, media_id: str, max_timeout_ms: int, ip_address: str
+        self,
+        server_name: str,
+        media_id: str,
+        max_timeout_ms: int,
+        ip_address: str,
+        use_federation: bool,
+        allow_authenticated: bool,
     ) -> RemoteMedia:
         """Gets the media info associated with the remote file, downloading
         if necessary.
@@ -553,6 +569,10 @@ class MediaRepository:
             max_timeout_ms: the maximum number of milliseconds to wait for the
                 media to be uploaded.
             ip_address: IP address of the requester
+            use_federation: if a download is necessary, whether to request the remote file
+                over the federation `/download` endpoint
+            allow_authenticated: whether media marked as authenticated may be served to this
+                request
 
         Returns:
             The media info of the file
@@ -573,7 +593,8 @@ class MediaRepository:
                 max_timeout_ms,
                 self.download_ratelimiter,
                 ip_address,
-                False,
+                use_federation,
+                allow_authenticated,
             )
 
         # Ensure we actually use the responder so that it releases resources
@@ -591,6 +612,7 @@ class MediaRepository:
         download_ratelimiter: Ratelimiter,
         ip_address: str,
         use_federation_endpoint: bool,
+        allow_authenticated: bool,
     ) -> Tuple[Optional[Responder], RemoteMedia]:
         """Looks for media in local cache, if not there then attempt to
         download from remote server.
@@ -612,6 +634,11 @@ class MediaRepository:
         """
         media_info = await self.store.get_cached_remote_media(server_name, media_id)
 
+        if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
+            # if it isn't cached then don't fetch it or if it's authenticated then don't serve it
+            if not media_info or media_info.authenticated:
+                raise NotFoundError()
+
         # file_id is the ID we use to track the file locally. If we've already
         # seen the file then reuse the existing ID, otherwise generate a new
         # one.
@@ -785,6 +812,11 @@ class MediaRepository:
 
         logger.info("Stored remote media in file %r", fname)
 
+        if self.hs.config.media.enable_authenticated_media:
+            authenticated = True
+        else:
+            authenticated = False
+
         return RemoteMedia(
             media_origin=server_name,
             media_id=media_id,
@@ -795,6 +827,7 @@ class MediaRepository:
             filesystem_id=file_id,
             last_access_ts=time_now_ms,
             quarantined_by=None,
+            authenticated=authenticated,
         )
 
     async def _federation_download_remote_file(
@@ -908,6 +941,11 @@ class MediaRepository:
 
         logger.debug("Stored remote media in file %r", fname)
 
+        if self.hs.config.media.enable_authenticated_media:
+            authenticated = True
+        else:
+            authenticated = False
+
         return RemoteMedia(
             media_origin=server_name,
             media_id=media_id,
@@ -918,6 +956,7 @@ class MediaRepository:
             filesystem_id=file_id,
             last_access_ts=time_now_ms,
             quarantined_by=None,
+            authenticated=authenticated,
         )
 
     def _get_thumbnail_requirements(
@@ -969,7 +1008,7 @@ class MediaRepository:
         t_method: str,
         t_type: str,
         url_cache: bool,
-    ) -> Optional[str]:
+    ) -> Optional[Tuple[str, FileInfo]]:
         input_path = await self.media_storage.ensure_media_is_in_local_cache(
             FileInfo(None, media_id, url_cache=url_cache)
         )
@@ -1023,10 +1062,15 @@ class MediaRepository:
             t_len = os.path.getsize(output_path)
 
             await self.store.store_local_thumbnail(
-                media_id, t_width, t_height, t_type, t_method, t_len
+                media_id,
+                t_width,
+                t_height,
+                t_type,
+                t_method,
+                t_len,
             )
 
-            return output_path
+            return output_path, file_info
 
         # Could not generate thumbnail.
         return None
diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py
index 2a106bb0eb..c25d1a9ba3 100644
--- a/synapse/media/media_storage.py
+++ b/synapse/media/media_storage.py
@@ -49,15 +49,11 @@ from zope.interface import implementer
 from twisted.internet import interfaces
 from twisted.internet.defer import Deferred
 from twisted.internet.interfaces import IConsumer
-from twisted.protocols.basic import FileSender
 
 from synapse.api.errors import NotFoundError
-from synapse.logging.context import (
-    defer_to_thread,
-    make_deferred_yieldable,
-    run_in_background,
-)
+from synapse.logging.context import defer_to_thread, run_in_background
 from synapse.logging.opentracing import start_active_span, trace, trace_with_opname
+from synapse.media._base import ThreadedFileSender
 from synapse.util import Clock
 from synapse.util.file_consumer import BackgroundFileConsumer
 
@@ -213,7 +209,7 @@ class MediaStorage:
             local_path = os.path.join(self.local_media_directory, path)
             if os.path.exists(local_path):
                 logger.debug("responding with local file %s", local_path)
-                return FileResponder(open(local_path, "rb"))
+                return FileResponder(self.hs, open(local_path, "rb"))
             logger.debug("local file %s did not exist", local_path)
 
         for provider in self.storage_providers:
@@ -336,13 +332,12 @@ class FileResponder(Responder):
             is closed when finished streaming.
     """
 
-    def __init__(self, open_file: IO):
+    def __init__(self, hs: "HomeServer", open_file: BinaryIO):
+        self.hs = hs
         self.open_file = open_file
 
     def write_to_consumer(self, consumer: IConsumer) -> Deferred:
-        return make_deferred_yieldable(
-            FileSender().beginFileTransfer(self.open_file, consumer)
-        )
+        return ThreadedFileSender(self.hs).beginFileTransfer(self.open_file, consumer)
 
     def __exit__(
         self,
@@ -549,7 +544,7 @@ class MultipartFileConsumer:
         Calculate the content length of the multipart response
         in bytes.
         """
-        if not self.length:
+        if self.length is None:
             return None
         # calculate length of json field and content-type, disposition headers
         json_field = json.dumps(self.json_field)
diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py
index 06e5d27a53..300952025a 100644
--- a/synapse/media/storage_provider.py
+++ b/synapse/media/storage_provider.py
@@ -145,6 +145,7 @@ class FileStorageProviderBackend(StorageProvider):
 
     def __init__(self, hs: "HomeServer", config: str):
         self.hs = hs
+        self.reactor = hs.get_reactor()
         self.cache_directory = hs.config.media.media_store_path
         self.base_directory = config
 
@@ -165,7 +166,7 @@ class FileStorageProviderBackend(StorageProvider):
         shutil_copyfile: Callable[[str, str], str] = shutil.copyfile
         with start_active_span("shutil_copyfile"):
             await defer_to_thread(
-                self.hs.get_reactor(),
+                self.reactor,
                 shutil_copyfile,
                 primary_fname,
                 backup_fname,
@@ -177,7 +178,7 @@ class FileStorageProviderBackend(StorageProvider):
 
         backup_fname = os.path.join(self.base_directory, path)
         if os.path.isfile(backup_fname):
-            return FileResponder(open(backup_fname, "rb"))
+            return FileResponder(self.hs, open(backup_fname, "rb"))
 
         return None
 
diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py
index f8a9560784..3845067835 100644
--- a/synapse/media/thumbnailer.py
+++ b/synapse/media/thumbnailer.py
@@ -26,7 +26,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Type
 
 from PIL import Image
 
-from synapse.api.errors import Codes, SynapseError, cs_error
+from synapse.api.errors import Codes, NotFoundError, SynapseError, cs_error
 from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP
 from synapse.http.server import respond_with_json
 from synapse.http.site import SynapseRequest
@@ -36,9 +36,11 @@ from synapse.media._base import (
     ThumbnailInfo,
     respond_404,
     respond_with_file,
+    respond_with_multipart_responder,
     respond_with_responder,
 )
-from synapse.media.media_storage import MediaStorage
+from synapse.media.media_storage import FileResponder, MediaStorage
+from synapse.storage.databases.main.media_repository import LocalMedia
 
 if TYPE_CHECKING:
     from synapse.media.media_repository import MediaRepository
@@ -204,7 +206,7 @@ class Thumbnailer:
     def _encode_image(self, output_image: Image.Image, output_type: str) -> BytesIO:
         output_bytes_io = BytesIO()
         fmt = self.FORMATS[output_type]
-        if fmt == "JPEG":
+        if fmt == "JPEG" or fmt == "PNG" and output_image.mode == "CMYK":
             output_image = output_image.convert("RGB")
         output_image.save(output_bytes_io, fmt, quality=80)
         return output_bytes_io
@@ -257,6 +259,7 @@ class ThumbnailProvider:
         media_storage: MediaStorage,
     ):
         self.hs = hs
+        self.reactor = hs.get_reactor()
         self.media_repo = media_repo
         self.media_storage = media_storage
         self.store = hs.get_datastores().main
@@ -271,6 +274,8 @@ class ThumbnailProvider:
         method: str,
         m_type: str,
         max_timeout_ms: int,
+        for_federation: bool,
+        allow_authenticated: bool = True,
     ) -> None:
         media_info = await self.media_repo.get_local_media_info(
             request, media_id, max_timeout_ms
@@ -278,6 +283,12 @@ class ThumbnailProvider:
         if not media_info:
             return
 
+        # if the media the thumbnail is generated from is authenticated, don't serve the
+        # thumbnail over an unauthenticated endpoint
+        if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
+            if media_info.authenticated:
+                raise NotFoundError()
+
         thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
         await self._select_and_respond_with_thumbnail(
             request,
@@ -290,6 +301,8 @@ class ThumbnailProvider:
             media_id,
             url_cache=bool(media_info.url_cache),
             server_name=None,
+            for_federation=for_federation,
+            media_info=media_info,
         )
 
     async def select_or_generate_local_thumbnail(
@@ -301,14 +314,21 @@ class ThumbnailProvider:
         desired_method: str,
         desired_type: str,
         max_timeout_ms: int,
+        for_federation: bool,
+        allow_authenticated: bool = True,
     ) -> None:
         media_info = await self.media_repo.get_local_media_info(
             request, media_id, max_timeout_ms
         )
-
         if not media_info:
             return
 
+        # if the media the thumbnail is generated from is authenticated, don't serve the
+        # thumbnail over an unauthenticated endpoint
+        if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
+            if media_info.authenticated:
+                raise NotFoundError()
+
         thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
         for info in thumbnail_infos:
             t_w = info.width == desired_width
@@ -326,15 +346,26 @@ class ThumbnailProvider:
 
                 responder = await self.media_storage.fetch_media(file_info)
                 if responder:
-                    await respond_with_responder(
-                        request, responder, info.type, info.length
-                    )
-                    return
+                    if for_federation:
+                        await respond_with_multipart_responder(
+                            self.hs.get_clock(),
+                            request,
+                            responder,
+                            info.type,
+                            info.length,
+                            None,
+                        )
+                        return
+                    else:
+                        await respond_with_responder(
+                            request, responder, info.type, info.length
+                        )
+                        return
 
         logger.debug("We don't have a thumbnail of that size. Generating")
 
         # Okay, so we generate one.
-        file_path = await self.media_repo.generate_local_exact_thumbnail(
+        thumbnail_result = await self.media_repo.generate_local_exact_thumbnail(
             media_id,
             desired_width,
             desired_height,
@@ -343,8 +374,21 @@ class ThumbnailProvider:
             url_cache=bool(media_info.url_cache),
         )
 
-        if file_path:
-            await respond_with_file(request, desired_type, file_path)
+        if thumbnail_result:
+            file_path, file_info = thumbnail_result
+            assert file_info.thumbnail is not None
+
+            if for_federation:
+                await respond_with_multipart_responder(
+                    self.hs.get_clock(),
+                    request,
+                    FileResponder(self.hs, open(file_path, "rb")),
+                    file_info.thumbnail.type,
+                    file_info.thumbnail.length,
+                    None,
+                )
+            else:
+                await respond_with_file(self.hs, request, desired_type, file_path)
         else:
             logger.warning("Failed to generate thumbnail")
             raise SynapseError(400, "Failed to generate thumbnail.")
@@ -360,14 +404,28 @@ class ThumbnailProvider:
         desired_type: str,
         max_timeout_ms: int,
         ip_address: str,
+        use_federation: bool,
+        allow_authenticated: bool = True,
     ) -> None:
         media_info = await self.media_repo.get_remote_media_info(
-            server_name, media_id, max_timeout_ms, ip_address
+            server_name,
+            media_id,
+            max_timeout_ms,
+            ip_address,
+            use_federation,
+            allow_authenticated,
         )
         if not media_info:
             respond_404(request)
             return
 
+        # if the media the thumbnail is generated from is authenticated, don't serve the
+        # thumbnail over an unauthenticated endpoint
+        if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
+            if media_info.authenticated:
+                respond_404(request)
+                return
+
         thumbnail_infos = await self.store.get_remote_media_thumbnails(
             server_name, media_id
         )
@@ -408,7 +466,7 @@ class ThumbnailProvider:
         )
 
         if file_path:
-            await respond_with_file(request, desired_type, file_path)
+            await respond_with_file(self.hs, request, desired_type, file_path)
         else:
             logger.warning("Failed to generate thumbnail")
             raise SynapseError(400, "Failed to generate thumbnail.")
@@ -424,16 +482,29 @@ class ThumbnailProvider:
         m_type: str,
         max_timeout_ms: int,
         ip_address: str,
+        use_federation: bool,
+        allow_authenticated: bool = True,
     ) -> None:
         # TODO: Don't download the whole remote file
         # We should proxy the thumbnail from the remote server instead of
         # downloading the remote file and generating our own thumbnails.
         media_info = await self.media_repo.get_remote_media_info(
-            server_name, media_id, max_timeout_ms, ip_address
+            server_name,
+            media_id,
+            max_timeout_ms,
+            ip_address,
+            use_federation,
+            allow_authenticated,
         )
         if not media_info:
             return
 
+        # if the media the thumbnail is generated from is authenticated, don't serve the
+        # thumbnail over an unauthenticated endpoint
+        if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
+            if media_info.authenticated:
+                raise NotFoundError()
+
         thumbnail_infos = await self.store.get_remote_media_thumbnails(
             server_name, media_id
         )
@@ -448,6 +519,7 @@ class ThumbnailProvider:
             media_info.filesystem_id,
             url_cache=False,
             server_name=server_name,
+            for_federation=False,
         )
 
     async def _select_and_respond_with_thumbnail(
@@ -461,6 +533,8 @@ class ThumbnailProvider:
         media_id: str,
         file_id: str,
         url_cache: bool,
+        for_federation: bool,
+        media_info: Optional[LocalMedia] = None,
         server_name: Optional[str] = None,
     ) -> None:
         """
@@ -476,6 +550,8 @@ class ThumbnailProvider:
             file_id: The ID of the media that a thumbnail is being requested for.
             url_cache: True if this is from a URL cache.
             server_name: The server name, if this is a remote thumbnail.
+            for_federation: whether the request is from the federation /thumbnail request
+            media_info: metadata about the media being requested.
         """
         logger.debug(
             "_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s",
@@ -511,13 +587,25 @@ class ThumbnailProvider:
 
             responder = await self.media_storage.fetch_media(file_info)
             if responder:
-                await respond_with_responder(
-                    request,
-                    responder,
-                    file_info.thumbnail.type,
-                    file_info.thumbnail.length,
-                )
-                return
+                if for_federation:
+                    assert media_info is not None
+                    await respond_with_multipart_responder(
+                        self.hs.get_clock(),
+                        request,
+                        responder,
+                        file_info.thumbnail.type,
+                        file_info.thumbnail.length,
+                        None,
+                    )
+                    return
+                else:
+                    await respond_with_responder(
+                        request,
+                        responder,
+                        file_info.thumbnail.type,
+                        file_info.thumbnail.length,
+                    )
+                    return
 
             # If we can't find the thumbnail we regenerate it. This can happen
             # if e.g. we've deleted the thumbnails but still have the original
@@ -558,12 +646,23 @@ class ThumbnailProvider:
                 )
 
             responder = await self.media_storage.fetch_media(file_info)
-            await respond_with_responder(
-                request,
-                responder,
-                file_info.thumbnail.type,
-                file_info.thumbnail.length,
-            )
+            if for_federation:
+                assert media_info is not None
+                await respond_with_multipart_responder(
+                    self.hs.get_clock(),
+                    request,
+                    responder,
+                    file_info.thumbnail.type,
+                    file_info.thumbnail.length,
+                    None,
+                )
+            else:
+                await respond_with_responder(
+                    request,
+                    responder,
+                    file_info.thumbnail.type,
+                    file_info.thumbnail.length,
+                )
         else:
             # This might be because:
             # 1. We can't create thumbnails for the given media (corrupted or
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index 19c92b02a0..49d0ff9fc1 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -293,7 +293,7 @@ def wrap_as_background_process(
     """
 
     def wrap_as_background_process_inner(
-        func: Callable[P, Awaitable[Optional[R]]]
+        func: Callable[P, Awaitable[Optional[R]]],
     ) -> Callable[P, "defer.Deferred[Optional[R]]"]:
         @wraps(func)
         def wrap_as_background_process_inner_2(
diff --git a/synapse/notifier.py b/synapse/notifier.py
index c3ecf86ec4..88f531182a 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -41,6 +41,7 @@ import attr
 from prometheus_client import Counter
 
 from twisted.internet import defer
+from twisted.internet.defer import Deferred
 
 from synapse.api.constants import EduTypes, EventTypes, HistoryVisibility, Membership
 from synapse.api.errors import AuthError
@@ -52,6 +53,7 @@ from synapse.logging.opentracing import log_kv, start_active_span
 from synapse.metrics import LaterGauge
 from synapse.streams.config import PaginationConfig
 from synapse.types import (
+    ISynapseReactor,
     JsonDict,
     MultiWriterStreamToken,
     PersistedEventPosition,
@@ -61,8 +63,11 @@ from synapse.types import (
     StreamToken,
     UserID,
 )
-from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
+from synapse.util.async_helpers import (
+    timeout_deferred,
+)
 from synapse.util.metrics import Measure
+from synapse.util.stringutils import shortstr
 from synapse.visibility import filter_events_for_client
 
 if TYPE_CHECKING:
@@ -89,18 +94,6 @@ def count(func: Callable[[T], bool], it: Iterable[T]) -> int:
     return n
 
 
-class _NotificationListener:
-    """This represents a single client connection to the events stream.
-    The events stream handler will have yielded to the deferred, so to
-    notify the handler it is sufficient to resolve the deferred.
-    """
-
-    __slots__ = ["deferred"]
-
-    def __init__(self, deferred: "defer.Deferred"):
-        self.deferred = deferred
-
-
 class _NotifierUserStream:
     """This represents a user connected to the event stream.
     It tracks the most recent stream token for that user.
@@ -113,59 +106,49 @@ class _NotifierUserStream:
 
     def __init__(
         self,
+        reactor: ISynapseReactor,
         user_id: str,
         rooms: StrCollection,
         current_token: StreamToken,
         time_now_ms: int,
     ):
+        self.reactor = reactor
         self.user_id = user_id
         self.rooms = set(rooms)
-        self.current_token = current_token
 
         # The last token for which we should wake up any streams that have a
         # token that comes before it. This gets updated every time we get poked.
         # We start it at the current token since if we get any streams
         # that have a token from before we have no idea whether they should be
         # woken up or not, so lets just wake them up.
-        self.last_notified_token = current_token
+        self.current_token = current_token
         self.last_notified_ms = time_now_ms
 
-        self.notify_deferred: ObservableDeferred[StreamToken] = ObservableDeferred(
-            defer.Deferred()
-        )
+        # Set of listeners that we need to wake up when there has been a change.
+        self.listeners: Set[Deferred[StreamToken]] = set()
 
-    def notify(
+    def update_and_fetch_deferreds(
         self,
-        stream_key: StreamKeyType,
-        stream_id: Union[int, RoomStreamToken, MultiWriterStreamToken],
+        current_token: StreamToken,
         time_now_ms: int,
-    ) -> None:
-        """Notify any listeners for this user of a new event from an
-        event source.
+    ) -> Collection["Deferred[StreamToken]"]:
+        """Update the stream for this user because of a new event from an
+        event source, and return the set of deferreds to wake up.
+
         Args:
-            stream_key: The stream the event came from.
-            stream_id: The new id for the stream the event came from.
+            current_token: The new current token.
             time_now_ms: The current time in milliseconds.
+
+        Returns:
+            The set of deferreds that need to be called.
         """
-        self.current_token = self.current_token.copy_and_advance(stream_key, stream_id)
-        self.last_notified_token = self.current_token
+        self.current_token = current_token
         self.last_notified_ms = time_now_ms
-        notify_deferred = self.notify_deferred
 
-        log_kv(
-            {
-                "notify": self.user_id,
-                "stream": stream_key,
-                "stream_id": stream_id,
-                "listeners": self.count_listeners(),
-            }
-        )
+        listeners = self.listeners
+        self.listeners = set()
 
-        users_woken_by_stream_counter.labels(stream_key).inc()
-
-        with PreserveLoggingContext():
-            self.notify_deferred = ObservableDeferred(defer.Deferred())
-            notify_deferred.callback(self.current_token)
+        return listeners
 
     def remove(self, notifier: "Notifier") -> None:
         """Remove this listener from all the indexes in the Notifier
@@ -179,9 +162,9 @@ class _NotifierUserStream:
         notifier.user_to_user_stream.pop(self.user_id)
 
     def count_listeners(self) -> int:
-        return len(self.notify_deferred.observers())
+        return len(self.listeners)
 
-    def new_listener(self, token: StreamToken) -> _NotificationListener:
+    def new_listener(self, token: StreamToken) -> "Deferred[StreamToken]":
         """Returns a deferred that is resolved when there is a new token
         greater than the given token.
 
@@ -191,10 +174,17 @@ class _NotifierUserStream:
         """
         # Immediately wake up stream if something has already since happened
         # since their last token.
-        if self.last_notified_token != token:
-            return _NotificationListener(defer.succeed(self.current_token))
-        else:
-            return _NotificationListener(self.notify_deferred.observe())
+        if token != self.current_token:
+            return defer.succeed(self.current_token)
+
+        # Create a new deferred and add it to the set of listeners. We add a
+        # cancel handler to remove it from the set again, to handle timeouts.
+        deferred: "Deferred[StreamToken]" = Deferred(
+            canceller=lambda d: self.listeners.discard(d)
+        )
+        self.listeners.add(deferred)
+
+        return deferred
 
 
 @attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -247,6 +237,7 @@ class Notifier:
         # List of callbacks to be notified when a lock is released
         self._lock_released_callback: List[Callable[[str, str, str], None]] = []
 
+        self.reactor = hs.get_reactor()
         self.clock = hs.get_clock()
         self.appservice_handler = hs.get_application_service_handler()
         self._pusher_pool = hs.get_pusherpool()
@@ -342,14 +333,25 @@ class Notifier:
         # Wake up all related user stream notifiers
         user_streams = self.room_to_user_streams.get(room_id, set())
         time_now_ms = self.clock.time_msec()
+        current_token = self.event_sources.get_current_token()
+
+        listeners: List["Deferred[StreamToken]"] = []
         for user_stream in user_streams:
             try:
-                user_stream.notify(
-                    StreamKeyType.UN_PARTIAL_STATED_ROOMS, new_token, time_now_ms
+                listeners.extend(
+                    user_stream.update_and_fetch_deferreds(current_token, time_now_ms)
                 )
             except Exception:
                 logger.exception("Failed to notify listener")
 
+        with PreserveLoggingContext():
+            for listener in listeners:
+                listener.callback(current_token)
+
+        users_woken_by_stream_counter.labels(StreamKeyType.UN_PARTIAL_STATED_ROOMS).inc(
+            len(user_streams)
+        )
+
         # Poke the replication so that other workers also see the write to
         # the un-partial-stated rooms stream.
         self.notify_replication()
@@ -519,12 +521,16 @@ class Notifier:
         rooms = rooms or []
 
         with Measure(self.clock, "on_new_event"):
-            user_streams = set()
+            user_streams: Set[_NotifierUserStream] = set()
 
             log_kv(
                 {
                     "waking_up_explicit_users": len(users),
                     "waking_up_explicit_rooms": len(rooms),
+                    "users": shortstr(users),
+                    "rooms": shortstr(rooms),
+                    "stream": stream_key,
+                    "stream_id": new_token,
                 }
             )
 
@@ -544,12 +550,27 @@ class Notifier:
                 )
 
             time_now_ms = self.clock.time_msec()
+            current_token = self.event_sources.get_current_token()
+            listeners: List["Deferred[StreamToken]"] = []
             for user_stream in user_streams:
                 try:
-                    user_stream.notify(stream_key, new_token, time_now_ms)
+                    listeners.extend(
+                        user_stream.update_and_fetch_deferreds(
+                            current_token, time_now_ms
+                        )
+                    )
                 except Exception:
                     logger.exception("Failed to notify listener")
 
+            # We resolve all these deferreds in one go so that we only need to
+            # call `PreserveLoggingContext` once, as it has a bunch of overhead
+            # (to calculate performance stats)
+            with PreserveLoggingContext():
+                for listener in listeners:
+                    listener.callback(current_token)
+
+            users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
+
             self.notify_replication()
 
             # Notify appservices.
@@ -586,6 +607,7 @@ class Notifier:
             if room_ids is None:
                 room_ids = await self.store.get_rooms_for_user(user_id)
             user_stream = _NotifierUserStream(
+                reactor=self.reactor,
                 user_id=user_id,
                 rooms=room_ids,
                 current_token=current_token,
@@ -608,8 +630,8 @@ class Notifier:
                         # Now we wait for the _NotifierUserStream to be told there
                         # is a new token.
                         listener = user_stream.new_listener(prev_token)
-                        listener.deferred = timeout_deferred(
-                            listener.deferred,
+                        listener = timeout_deferred(
+                            listener,
                             (end_time - now) / 1000.0,
                             self.hs.get_reactor(),
                         )
@@ -622,7 +644,7 @@ class Notifier:
                         )
 
                         with PreserveLoggingContext():
-                            await listener.deferred
+                            await listener
 
                         log_kv(
                             {
@@ -773,6 +795,7 @@ class Notifier:
         stream_token = await self.event_sources.bound_future_token(stream_token)
 
         start = self.clock.time_msec()
+        logged = False
         while True:
             current_token = self.event_sources.get_current_token()
             if stream_token.is_before_or_eq(current_token):
@@ -783,11 +806,13 @@ class Notifier:
             if now - start > 10_000:
                 return False
 
-            logger.info(
-                "Waiting for current token to reach %s; currently at %s",
-                stream_token,
-                current_token,
-            )
+            if not logged:
+                logger.info(
+                    "Waiting for current token to reach %s; currently at %s",
+                    stream_token,
+                    current_token,
+                )
+                logged = True
 
             # TODO: be better
             await self.clock.sleep(0.5)
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 34ab637c3d..9c0592a902 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -304,9 +304,9 @@ class BulkPushRuleEvaluator:
                     if relation_type == "m.thread" and event.content.get(
                         "m.relates_to", {}
                     ).get("is_falling_back", False):
-                        related_events["m.in_reply_to"][
-                            "im.vector.is_falling_back"
-                        ] = ""
+                        related_events["m.in_reply_to"]["im.vector.is_falling_back"] = (
+                            ""
+                        )
 
         return related_events
 
@@ -372,7 +372,8 @@ class BulkPushRuleEvaluator:
                 gather_results(
                     (
                         run_in_background(  # type: ignore[call-arg]
-                            self.store.get_number_joined_users_in_room, event.room_id  # type: ignore[arg-type]
+                            self.store.get_number_joined_users_in_room,
+                            event.room_id,  # type: ignore[arg-type]
                         ),
                         run_in_background(
                             self._get_power_levels_and_sender_level,
@@ -435,6 +436,7 @@ class BulkPushRuleEvaluator:
             self._related_event_match_enabled,
             event.room_version.msc3931_push_features,
             self.hs.config.experimental.msc1767_enabled,  # MSC3931 flag
+            self.hs.config.experimental.msc4210_enabled,
         )
 
         for uid, rules in rules_by_user.items():
diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py
index c9cf838255..d500051714 100644
--- a/synapse/replication/http/__init__.py
+++ b/synapse/replication/http/__init__.py
@@ -1,7 +1,7 @@
 #
 # This file is licensed under the Affero General Public License (AGPL) version 3.
 #
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -23,6 +23,7 @@ from typing import TYPE_CHECKING
 from synapse.http.server import JsonResource
 from synapse.replication.http import (
     account_data,
+    delayed_events,
     devices,
     federation,
     login,
@@ -64,3 +65,4 @@ class ReplicationRestResource(JsonResource):
             login.register_servlets(hs, self)
             register.register_servlets(hs, self)
             devices.register_servlets(hs, self)
+            delayed_events.register_servlets(hs, self)
diff --git a/synapse/replication/http/delayed_events.py b/synapse/replication/http/delayed_events.py
new file mode 100644
index 0000000000..229022070c
--- /dev/null
+++ b/synapse/replication/http/delayed_events.py
@@ -0,0 +1,62 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+import logging
+from typing import TYPE_CHECKING, Dict, Optional, Tuple
+
+from twisted.web.server import Request
+
+from synapse.http.server import HttpServer
+from synapse.replication.http._base import ReplicationEndpoint
+from synapse.types import JsonDict, JsonMapping
+
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class ReplicationAddedDelayedEventRestServlet(ReplicationEndpoint):
+    """Handle a delayed event being added by another worker.
+
+    Request format:
+
+        POST /_synapse/replication/delayed_event_added/
+
+        {}
+    """
+
+    NAME = "added_delayed_event"
+    PATH_ARGS = ()
+    CACHE = False
+
+    def __init__(self, hs: "HomeServer"):
+        super().__init__(hs)
+
+        self.handler = hs.get_delayed_events_handler()
+
+    @staticmethod
+    async def _serialize_payload(next_send_ts: int) -> JsonDict:  # type: ignore[override]
+        return {"next_send_ts": next_send_ts}
+
+    async def _handle_request(  # type: ignore[override]
+        self, request: Request, content: JsonDict
+    ) -> Tuple[int, Dict[str, Optional[JsonMapping]]]:
+        self.handler.on_added(int(content["next_send_ts"]))
+
+        return 200, {}
+
+
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+    ReplicationAddedDelayedEventRestServlet(hs).register(http_server)
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index 9c537427df..940f418396 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -119,7 +119,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
 
         return payload
 
-    async def _handle_request(self, request: Request, content: JsonDict) -> Tuple[int, JsonDict]:  # type: ignore[override]
+    async def _handle_request(  # type: ignore[override]
+        self, request: Request, content: JsonDict
+    ) -> Tuple[int, JsonDict]:
         with Measure(self.clock, "repl_fed_send_events_parse"):
             room_id = content["room_id"]
             backfilled = content["backfilled"]
diff --git a/synapse/replication/http/push.py b/synapse/replication/http/push.py
index de07e75b46..48e254cdb1 100644
--- a/synapse/replication/http/push.py
+++ b/synapse/replication/http/push.py
@@ -48,7 +48,7 @@ class ReplicationRemovePusherRestServlet(ReplicationEndpoint):
 
     """
 
-    NAME = "add_user_account_data"
+    NAME = "remove_pusher"
     PATH_ARGS = ("user_id",)
     CACHE = False
 
@@ -98,7 +98,9 @@ class ReplicationCopyPusherRestServlet(ReplicationEndpoint):
         self._store = hs.get_datastores().main
 
     @staticmethod
-    async def _serialize_payload(user_id: str, old_room_id: str, new_room_id: str) -> JsonDict:  # type: ignore[override]
+    async def _serialize_payload(  # type: ignore[override]
+        user_id: str, old_room_id: str, new_room_id: str
+    ) -> JsonDict:
         return {}
 
     async def _handle_request(  # type: ignore[override]
@@ -109,7 +111,6 @@ class ReplicationCopyPusherRestServlet(ReplicationEndpoint):
         old_room_id: str,
         new_room_id: str,
     ) -> Tuple[int, JsonDict]:
-
         await self._store.copy_push_rules_from_room_to_room_for_user(
             old_room_id, new_room_id, user_id
         )
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 3dddbb70b4..0bd5478cd3 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -18,8 +18,8 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
-"""A replication client for use by synapse workers.
-"""
+"""A replication client for use by synapse workers."""
+
 import logging
 from typing import TYPE_CHECKING, Dict, Iterable, Optional, Set, Tuple
 
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index b7a7e77597..7d51441e91 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -23,6 +23,7 @@
 The VALID_SERVER_COMMANDS and VALID_CLIENT_COMMANDS define which commands are
 allowed to be sent by which side.
 """
+
 import abc
 import logging
 from typing import List, Optional, Tuple, Type, TypeVar
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index 72a42cb6cc..6101226938 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -857,7 +857,7 @@ UpdateRow = TypeVar("UpdateRow")
 
 
 def _batch_updates(
-    updates: Iterable[Tuple[UpdateToken, UpdateRow]]
+    updates: Iterable[Tuple[UpdateToken, UpdateRow]],
 ) -> Iterator[Tuple[UpdateToken, List[UpdateRow]]]:
     """Collect stream updates with the same token together
 
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 4471cc8f0c..fb9c539122 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -23,6 +23,7 @@ protocols.
 
 An explanation of this protocol is available in docs/tcp_replication.md
 """
+
 import fcntl
 import logging
 import struct
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index c0329378ac..d647a2b332 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -18,8 +18,7 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
-"""The server side of the replication stream.
-"""
+"""The server side of the replication stream."""
 
 import logging
 import random
@@ -307,7 +306,7 @@ class ReplicationStreamer:
 
 
 def _batch_updates(
-    updates: List[Tuple[Token, StreamRow]]
+    updates: List[Tuple[Token, StreamRow]],
 ) -> List[Tuple[Optional[Token], StreamRow]]:
     """Takes a list of updates of form [(token, row)] and sets the token to
     None for all rows where the next row has the same token. This is used to
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index d021904de7..ebf5964d29 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -247,7 +247,7 @@ class _StreamFromIdGen(Stream):
 
 
 def current_token_without_instance(
-    current_token: Callable[[], int]
+    current_token: Callable[[], int],
 ) -> Callable[[str], int]:
     """Takes a current token callback function for a single writer stream
     that doesn't take an instance name parameter and wraps it in a function that
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index c94d454a28..4e594e6595 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -2,7 +2,7 @@
 # This file is licensed under the Affero General Public License (AGPL) version 3.
 #
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -18,7 +18,8 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
-from typing import TYPE_CHECKING, Callable
+import logging
+from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
 
 from synapse.http.server import HttpServer, JsonResource
 from synapse.rest import admin
@@ -30,6 +31,7 @@ from synapse.rest.client import (
     auth,
     auth_issuer,
     capabilities,
+    delayed_events,
     devices,
     directory,
     events,
@@ -67,11 +69,65 @@ from synapse.rest.client import (
     voip,
 )
 
+logger = logging.getLogger(__name__)
+
 if TYPE_CHECKING:
     from synapse.server import HomeServer
 
 RegisterServletsFunc = Callable[["HomeServer", HttpServer], None]
 
+CLIENT_SERVLET_FUNCTIONS: Tuple[RegisterServletsFunc, ...] = (
+    versions.register_servlets,
+    initial_sync.register_servlets,
+    room.register_deprecated_servlets,
+    events.register_servlets,
+    room.register_servlets,
+    delayed_events.register_servlets,
+    login.register_servlets,
+    profile.register_servlets,
+    presence.register_servlets,
+    directory.register_servlets,
+    voip.register_servlets,
+    pusher.register_servlets,
+    push_rule.register_servlets,
+    logout.register_servlets,
+    sync.register_servlets,
+    filter.register_servlets,
+    account.register_servlets,
+    register.register_servlets,
+    auth.register_servlets,
+    receipts.register_servlets,
+    read_marker.register_servlets,
+    room_keys.register_servlets,
+    keys.register_servlets,
+    tokenrefresh.register_servlets,
+    tags.register_servlets,
+    account_data.register_servlets,
+    reporting.register_servlets,
+    openid.register_servlets,
+    notifications.register_servlets,
+    devices.register_servlets,
+    thirdparty.register_servlets,
+    sendtodevice.register_servlets,
+    user_directory.register_servlets,
+    room_upgrade_rest_servlet.register_servlets,
+    capabilities.register_servlets,
+    account_validity.register_servlets,
+    relations.register_servlets,
+    password_policy.register_servlets,
+    knock.register_servlets,
+    appservice_ping.register_servlets,
+    admin.register_servlets_for_client_rest_resource,
+    mutual_rooms.register_servlets,
+    login_token_request.register_servlets,
+    rendezvous.register_servlets,
+    auth_issuer.register_servlets,
+)
+
+SERVLET_GROUPS: Dict[str, Iterable[RegisterServletsFunc]] = {
+    "client": CLIENT_SERVLET_FUNCTIONS,
+}
+
 
 class ClientRestResource(JsonResource):
     """Matrix Client API REST resource.
@@ -83,80 +139,56 @@ class ClientRestResource(JsonResource):
        * etc
     """
 
-    def __init__(self, hs: "HomeServer"):
+    def __init__(self, hs: "HomeServer", servlet_groups: Optional[List[str]] = None):
         JsonResource.__init__(self, hs, canonical_json=False)
-        self.register_servlets(self, hs)
+        if hs.config.media.can_load_media_repo:
+            # This import is here to prevent a circular import failure
+            from synapse.rest.client import media
+
+            SERVLET_GROUPS["media"] = (media.register_servlets,)
+        self.register_servlets(self, hs, servlet_groups)
 
     @staticmethod
-    def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None:
+    def register_servlets(
+        client_resource: HttpServer,
+        hs: "HomeServer",
+        servlet_groups: Optional[Iterable[str]] = None,
+    ) -> None:
         # Some servlets are only registered on the main process (and not worker
         # processes).
         is_main_process = hs.config.worker.worker_app is None
 
-        versions.register_servlets(hs, client_resource)
+        if not servlet_groups:
+            servlet_groups = SERVLET_GROUPS.keys()
 
-        # Deprecated in r0
-        initial_sync.register_servlets(hs, client_resource)
-        room.register_deprecated_servlets(hs, client_resource)
+        for servlet_group in servlet_groups:
+            # Fail on unknown servlet groups.
+            if servlet_group not in SERVLET_GROUPS:
+                if servlet_group == "media":
+                    logger.warn(
+                        "media.can_load_media_repo needs to be configured for the media servlet to be available"
+                    )
+                raise RuntimeError(
+                    f"Attempting to register unknown client servlet: '{servlet_group}'"
+                )
 
-        # Partially deprecated in r0
-        events.register_servlets(hs, client_resource)
+            for servletfunc in SERVLET_GROUPS[servlet_group]:
+                if not is_main_process and servletfunc in [
+                    pusher.register_servlets,
+                    logout.register_servlets,
+                    auth.register_servlets,
+                    tokenrefresh.register_servlets,
+                    reporting.register_servlets,
+                    openid.register_servlets,
+                    thirdparty.register_servlets,
+                    room_upgrade_rest_servlet.register_servlets,
+                    account_validity.register_servlets,
+                    admin.register_servlets_for_client_rest_resource,
+                    mutual_rooms.register_servlets,
+                    login_token_request.register_servlets,
+                    rendezvous.register_servlets,
+                    auth_issuer.register_servlets,
+                ]:
+                    continue
 
-        room.register_servlets(hs, client_resource)
-        login.register_servlets(hs, client_resource)
-        profile.register_servlets(hs, client_resource)
-        presence.register_servlets(hs, client_resource)
-        directory.register_servlets(hs, client_resource)
-        voip.register_servlets(hs, client_resource)
-        if is_main_process:
-            pusher.register_servlets(hs, client_resource)
-        push_rule.register_servlets(hs, client_resource)
-        if is_main_process:
-            logout.register_servlets(hs, client_resource)
-        sync.register_servlets(hs, client_resource)
-        filter.register_servlets(hs, client_resource)
-        account.register_servlets(hs, client_resource)
-        register.register_servlets(hs, client_resource)
-        if is_main_process:
-            auth.register_servlets(hs, client_resource)
-        receipts.register_servlets(hs, client_resource)
-        read_marker.register_servlets(hs, client_resource)
-        room_keys.register_servlets(hs, client_resource)
-        keys.register_servlets(hs, client_resource)
-        if is_main_process:
-            tokenrefresh.register_servlets(hs, client_resource)
-        tags.register_servlets(hs, client_resource)
-        account_data.register_servlets(hs, client_resource)
-        if is_main_process:
-            reporting.register_servlets(hs, client_resource)
-            openid.register_servlets(hs, client_resource)
-        notifications.register_servlets(hs, client_resource)
-        devices.register_servlets(hs, client_resource)
-        if is_main_process:
-            thirdparty.register_servlets(hs, client_resource)
-        sendtodevice.register_servlets(hs, client_resource)
-        user_directory.register_servlets(hs, client_resource)
-        if is_main_process:
-            room_upgrade_rest_servlet.register_servlets(hs, client_resource)
-        capabilities.register_servlets(hs, client_resource)
-        if is_main_process:
-            account_validity.register_servlets(hs, client_resource)
-        relations.register_servlets(hs, client_resource)
-        password_policy.register_servlets(hs, client_resource)
-        knock.register_servlets(hs, client_resource)
-        appservice_ping.register_servlets(hs, client_resource)
-        if hs.config.server.enable_media_repo:
-            from synapse.rest.client import media
-
-            media.register_servlets(hs, client_resource)
-
-        # moving to /_synapse/admin
-        if is_main_process:
-            admin.register_servlets_for_client_rest_resource(hs, client_resource)
-
-        # unstable
-        if is_main_process:
-            mutual_rooms.register_servlets(hs, client_resource)
-            login_token_request.register_servlets(hs, client_resource)
-            rendezvous.register_servlets(hs, client_resource)
-            auth_issuer.register_servlets(hs, client_resource)
+                servletfunc(hs, client_resource)
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index cdaee17451..4db8975674 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -98,6 +98,8 @@ from synapse.rest.admin.users import (
     DeactivateAccountRestServlet,
     PushersRestServlet,
     RateLimitRestServlet,
+    RedactUser,
+    RedactUserStatus,
     ResetPasswordRestServlet,
     SearchUsersRestServlet,
     ShadowBanRestServlet,
@@ -319,6 +321,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
     UserReplaceMasterCrossSigningKeyRestServlet(hs).register(http_server)
     UserByExternalId(hs).register(http_server)
     UserByThreePid(hs).register(http_server)
+    RedactUser(hs).register(http_server)
+    RedactUserStatus(hs).register(http_server)
 
     DeviceRestServlet(hs).register(http_server)
     DevicesRestServlet(hs).register(http_server)
diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py
index 0867f7a51c..bec2331590 100644
--- a/synapse/rest/admin/registration_tokens.py
+++ b/synapse/rest/admin/registration_tokens.py
@@ -181,8 +181,7 @@ class NewRegistrationTokenRestServlet(RestServlet):
 
         uses_allowed = body.get("uses_allowed", None)
         if not (
-            uses_allowed is None
-            or (type(uses_allowed) is int and uses_allowed >= 0)  # noqa: E721
+            uses_allowed is None or (type(uses_allowed) is int and uses_allowed >= 0)  # noqa: E721
         ):
             raise SynapseError(
                 HTTPStatus.BAD_REQUEST,
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index ad515bd5a3..b146c2754d 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -27,7 +27,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
 
 import attr
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
+from synapse._pydantic_compat import StrictBool, StrictInt, StrictStr
 from synapse.api.constants import Direction, UserTypes
 from synapse.api.errors import Codes, NotFoundError, SynapseError
 from synapse.http.servlet import (
@@ -50,17 +50,12 @@ from synapse.rest.admin._base import (
 from synapse.rest.client._base import client_patterns
 from synapse.storage.databases.main.registration import ExternalIDReuseException
 from synapse.storage.databases.main.stats import UserSortOrder
-from synapse.types import JsonDict, JsonMapping, UserID
+from synapse.types import JsonDict, JsonMapping, TaskStatus, UserID
 from synapse.types.rest import RequestBodyModel
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
 
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import StrictBool
-else:
-    from pydantic import StrictBool
-
 
 logger = logging.getLogger(__name__)
 
@@ -1410,3 +1405,99 @@ class UserByThreePid(RestServlet):
             raise NotFoundError("User not found")
 
         return HTTPStatus.OK, {"user_id": user_id}
+
+
+class RedactUser(RestServlet):
+    """
+    Redact all the events of a given user in the given rooms or if empty dict is provided
+    then all events in all rooms user is member of. Kicks off a background process and
+    returns an id that can be used to check on the progress of the redaction progress
+    """
+
+    PATTERNS = admin_patterns("/user/(?P<user_id>[^/]*)/redact")
+
+    def __init__(self, hs: "HomeServer"):
+        self._auth = hs.get_auth()
+        self._store = hs.get_datastores().main
+        self.admin_handler = hs.get_admin_handler()
+
+    class PostBody(RequestBodyModel):
+        rooms: List[StrictStr]
+        reason: Optional[StrictStr]
+        limit: Optional[StrictInt]
+
+    async def on_POST(
+        self, request: SynapseRequest, user_id: str
+    ) -> Tuple[int, JsonDict]:
+        requester = await self._auth.get_user_by_req(request)
+        await assert_user_is_admin(self._auth, requester)
+
+        # parse provided user id to check that it is valid
+        UserID.from_string(user_id)
+
+        body = parse_and_validate_json_object_from_request(request, self.PostBody)
+
+        limit = body.limit
+        if limit and limit <= 0:
+            raise SynapseError(
+                HTTPStatus.BAD_REQUEST,
+                "If limit is provided it must be a non-negative integer greater than 0.",
+            )
+
+        rooms = body.rooms
+        if not rooms:
+            current_rooms = list(await self._store.get_rooms_for_user(user_id))
+            banned_rooms = list(
+                await self._store.get_rooms_user_currently_banned_from(user_id)
+            )
+            rooms = current_rooms + banned_rooms
+
+        redact_id = await self.admin_handler.start_redact_events(
+            user_id, rooms, requester.serialize(), body.reason, limit
+        )
+
+        return HTTPStatus.OK, {"redact_id": redact_id}
+
+
+class RedactUserStatus(RestServlet):
+    """
+    Check on the progress of the redaction request represented by the provided ID, returning
+    the status of the process and a dict of events that were unable to be redacted, if any
+    """
+
+    PATTERNS = admin_patterns("/user/redact_status/(?P<redact_id>[^/]*)$")
+
+    def __init__(self, hs: "HomeServer"):
+        self._auth = hs.get_auth()
+        self.admin_handler = hs.get_admin_handler()
+
+    async def on_GET(
+        self, request: SynapseRequest, redact_id: str
+    ) -> Tuple[int, JsonDict]:
+        await assert_requester_is_admin(self._auth, request)
+
+        task = await self.admin_handler.get_redact_task(redact_id)
+
+        if task:
+            if task.status == TaskStatus.ACTIVE:
+                return HTTPStatus.OK, {"status": TaskStatus.ACTIVE}
+            elif task.status == TaskStatus.COMPLETE:
+                assert task.result is not None
+                failed_redactions = task.result.get("failed_redactions")
+                return HTTPStatus.OK, {
+                    "status": TaskStatus.COMPLETE,
+                    "failed_redactions": failed_redactions if failed_redactions else {},
+                }
+            elif task.status == TaskStatus.SCHEDULED:
+                return HTTPStatus.OK, {"status": TaskStatus.SCHEDULED}
+            else:
+                return HTTPStatus.OK, {
+                    "status": TaskStatus.FAILED,
+                    "error": (
+                        task.error
+                        if task.error
+                        else "Unknown error, please check the logs for more information."
+                    ),
+                }
+        else:
+            raise NotFoundError("redact id '%s' not found" % redact_id)
diff --git a/synapse/rest/client/_base.py b/synapse/rest/client/_base.py
index 93dec6375a..6cf37869d8 100644
--- a/synapse/rest/client/_base.py
+++ b/synapse/rest/client/_base.py
@@ -19,8 +19,8 @@
 #
 #
 
-"""This module contains base REST classes for constructing client v1 servlets.
-"""
+"""This module contains base REST classes for constructing client v1 servlets."""
+
 import logging
 import re
 from typing import Any, Awaitable, Callable, Iterable, Pattern, Tuple, TypeVar, cast
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index 8daa449f9e..32fa7b4ec4 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -24,18 +24,12 @@ import random
 from typing import TYPE_CHECKING, List, Optional, Tuple
 from urllib.parse import urlparse
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import StrictBool, StrictStr, constr
-else:
-    from pydantic import StrictBool, StrictStr, constr
-
 import attr
 from typing_extensions import Literal
 
 from twisted.web.server import Request
 
+from synapse._pydantic_compat import StrictBool, StrictStr, constr
 from synapse.api.constants import LoginType
 from synapse.api.errors import (
     Codes,
diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py
index 0ee24081fa..734c9e992f 100644
--- a/synapse/rest/client/account_data.py
+++ b/synapse/rest/client/account_data.py
@@ -108,9 +108,9 @@ class AccountDataServlet(RestServlet):
 
         # Push rules are stored in a separate table and must be queried separately.
         if account_data_type == AccountDataTypes.PUSH_RULES:
-            account_data: Optional[JsonMapping] = (
-                await self._push_rules_handler.push_rules_for_user(requester.user)
-            )
+            account_data: Optional[
+                JsonMapping
+            ] = await self._push_rules_handler.push_rules_for_user(requester.user)
         else:
             account_data = await self.store.get_global_account_data_by_type_for_user(
                 user_id, account_data_type
diff --git a/synapse/rest/client/account_validity.py b/synapse/rest/client/account_validity.py
index 6222a5cc37..ec7836b647 100644
--- a/synapse/rest/client/account_validity.py
+++ b/synapse/rest/client/account_validity.py
@@ -48,9 +48,7 @@ class AccountValidityRenewServlet(RestServlet):
         self.account_renewed_template = (
             hs.config.account_validity.account_validity_account_renewed_template
         )
-        self.account_previously_renewed_template = (
-            hs.config.account_validity.account_validity_account_previously_renewed_template
-        )
+        self.account_previously_renewed_template = hs.config.account_validity.account_validity_account_previously_renewed_template
         self.invalid_token_template = (
             hs.config.account_validity.account_validity_invalid_token_template
         )
diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py
index 4221f35937..b8dca7c797 100644
--- a/synapse/rest/client/auth.py
+++ b/synapse/rest/client/auth.py
@@ -20,14 +20,14 @@
 #
 
 import logging
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, cast
 
 from twisted.web.server import Request
 
 from synapse.api.constants import LoginType
 from synapse.api.errors import LoginError, SynapseError
 from synapse.api.urls import CLIENT_API_PREFIX
-from synapse.http.server import HttpServer, respond_with_html
+from synapse.http.server import HttpServer, respond_with_html, respond_with_redirect
 from synapse.http.servlet import RestServlet, parse_string
 from synapse.http.site import SynapseRequest
 
@@ -66,6 +66,23 @@ class AuthRestServlet(RestServlet):
         if not session:
             raise SynapseError(400, "No session supplied")
 
+        if (
+            self.hs.config.experimental.msc3861.enabled
+            and stagetype == "org.matrix.cross_signing_reset"
+        ):
+            # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth
+            # We import lazily here because of the authlib requirement
+            from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth
+
+            auth = cast(MSC3861DelegatedAuth, self.auth)
+
+            url = await auth.account_management_url()
+            if url is not None:
+                url = f"{url}?action=org.matrix.cross_signing_reset"
+            else:
+                url = await auth.issuer()
+            respond_with_redirect(request, str.encode(url))
+
         if stagetype == LoginType.RECAPTCHA:
             html = self.recaptcha_template.render(
                 session=session,
diff --git a/synapse/rest/client/auth_issuer.py b/synapse/rest/client/auth_issuer.py
index 77b9720956..acd0399d85 100644
--- a/synapse/rest/client/auth_issuer.py
+++ b/synapse/rest/client/auth_issuer.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 import logging
 import typing
-from typing import Tuple
+from typing import Tuple, cast
 
 from synapse.api.errors import Codes, SynapseError
 from synapse.http.server import HttpServer
@@ -43,10 +43,16 @@ class AuthIssuerServlet(RestServlet):
     def __init__(self, hs: "HomeServer"):
         super().__init__()
         self._config = hs.config
+        self._auth = hs.get_auth()
 
     async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
         if self._config.experimental.msc3861.enabled:
-            return 200, {"issuer": self._config.experimental.msc3861.issuer}
+            # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth
+            # We import lazily here because of the authlib requirement
+            from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth
+
+            auth = cast(MSC3861DelegatedAuth, self._auth)
+            return 200, {"issuer": await auth.issuer()}
         else:
             # Wouldn't expect this to be reached: the servelet shouldn't have been
             # registered. Still, fail gracefully if we are registered for some reason.
diff --git a/synapse/rest/client/delayed_events.py b/synapse/rest/client/delayed_events.py
new file mode 100644
index 0000000000..2dd5a60b2b
--- /dev/null
+++ b/synapse/rest/client/delayed_events.py
@@ -0,0 +1,111 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+# This module contains REST servlets to do with delayed events: /delayed_events/<paths>
+
+import logging
+from enum import Enum
+from http import HTTPStatus
+from typing import TYPE_CHECKING, Tuple
+
+from synapse.api.errors import Codes, SynapseError
+from synapse.http.server import HttpServer
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class _UpdateDelayedEventAction(Enum):
+    CANCEL = "cancel"
+    RESTART = "restart"
+    SEND = "send"
+
+
+class UpdateDelayedEventServlet(RestServlet):
+    PATTERNS = client_patterns(
+        r"/org\.matrix\.msc4140/delayed_events/(?P<delay_id>[^/]+)$",
+        releases=(),
+    )
+    CATEGORY = "Delayed event management requests"
+
+    def __init__(self, hs: "HomeServer"):
+        super().__init__()
+        self.auth = hs.get_auth()
+        self.delayed_events_handler = hs.get_delayed_events_handler()
+
+    async def on_POST(
+        self, request: SynapseRequest, delay_id: str
+    ) -> Tuple[int, JsonDict]:
+        requester = await self.auth.get_user_by_req(request)
+
+        body = parse_json_object_from_request(request)
+        try:
+            action = str(body["action"])
+        except KeyError:
+            raise SynapseError(
+                HTTPStatus.BAD_REQUEST,
+                "'action' is missing",
+                Codes.MISSING_PARAM,
+            )
+        try:
+            enum_action = _UpdateDelayedEventAction(action)
+        except ValueError:
+            raise SynapseError(
+                HTTPStatus.BAD_REQUEST,
+                "'action' is not one of "
+                + ", ".join(f"'{m.value}'" for m in _UpdateDelayedEventAction),
+                Codes.INVALID_PARAM,
+            )
+
+        if enum_action == _UpdateDelayedEventAction.CANCEL:
+            await self.delayed_events_handler.cancel(requester, delay_id)
+        elif enum_action == _UpdateDelayedEventAction.RESTART:
+            await self.delayed_events_handler.restart(requester, delay_id)
+        elif enum_action == _UpdateDelayedEventAction.SEND:
+            await self.delayed_events_handler.send(requester, delay_id)
+        return 200, {}
+
+
+class DelayedEventsServlet(RestServlet):
+    PATTERNS = client_patterns(
+        r"/org\.matrix\.msc4140/delayed_events$",
+        releases=(),
+    )
+    CATEGORY = "Delayed event management requests"
+
+    def __init__(self, hs: "HomeServer"):
+        super().__init__()
+        self.auth = hs.get_auth()
+        self.delayed_events_handler = hs.get_delayed_events_handler()
+
+    async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+        requester = await self.auth.get_user_by_req(request)
+        # TODO: Support Pagination stream API ("from" query parameter)
+        delayed_events = await self.delayed_events_handler.get_all_for_user(requester)
+
+        ret = {"delayed_events": delayed_events}
+        return 200, ret
+
+
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+    # The following can't currently be instantiated on workers.
+    if hs.config.worker.worker_app is None:
+        UpdateDelayedEventServlet(hs).register(http_server)
+    DelayedEventsServlet(hs).register(http_server)
diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py
index 8313d687b7..6a45a5d130 100644
--- a/synapse/rest/client/devices.py
+++ b/synapse/rest/client/devices.py
@@ -24,13 +24,7 @@ import logging
 from http import HTTPStatus
 from typing import TYPE_CHECKING, List, Optional, Tuple
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import Extra, StrictStr
-else:
-    from pydantic import Extra, StrictStr
-
+from synapse._pydantic_compat import Extra, StrictStr
 from synapse.api import errors
 from synapse.api.errors import NotFoundError, SynapseError, UnrecognizedRequestError
 from synapse.handlers.device import DeviceHandler
diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py
index 11fdd0f7c6..98ba5c4c2a 100644
--- a/synapse/rest/client/directory.py
+++ b/synapse/rest/client/directory.py
@@ -22,17 +22,11 @@
 import logging
 from typing import TYPE_CHECKING, List, Optional, Tuple
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import StrictStr
-else:
-    from pydantic import StrictStr
-
 from typing_extensions import Literal
 
 from twisted.web.server import Request
 
+from synapse._pydantic_compat import StrictStr
 from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
 from synapse.http.server import HttpServer
 from synapse.http.servlet import (
diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py
index 613890061e..ad23cc76ce 100644
--- a/synapse/rest/client/events.py
+++ b/synapse/rest/client/events.py
@@ -20,6 +20,7 @@
 #
 
 """This module contains REST servlets to do with event streaming, /events."""
+
 import logging
 from typing import TYPE_CHECKING, Dict, List, Tuple, Union
 
diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py
index 67de634eab..7025662fdc 100644
--- a/synapse/rest/client/keys.py
+++ b/synapse/rest/client/keys.py
@@ -23,10 +23,13 @@
 import logging
 import re
 from collections import Counter
-from http import HTTPStatus
-from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, cast
 
-from synapse.api.errors import Codes, InvalidAPICallError, SynapseError
+from synapse.api.errors import (
+    InteractiveAuthIncompleteError,
+    InvalidAPICallError,
+    SynapseError,
+)
 from synapse.http.server import HttpServer
 from synapse.http.servlet import (
     RestServlet,
@@ -256,9 +259,15 @@ class KeyChangesServlet(RestServlet):
 
         user_id = requester.user.to_string()
 
-        results = await self.device_handler.get_user_ids_changed(user_id, from_token)
+        device_list_updates = await self.device_handler.get_user_ids_changed(
+            user_id, from_token
+        )
 
-        return 200, results
+        response: JsonDict = {}
+        response["changed"] = list(device_list_updates.changed)
+        response["left"] = list(device_list_updates.left)
+
+        return 200, response
 
 
 class OneTimeKeyServlet(RestServlet):
@@ -397,17 +406,36 @@ class SigningKeyUploadServlet(RestServlet):
             # explicitly mark the master key as replaceable.
             if self.hs.config.experimental.msc3861.enabled:
                 if not master_key_updatable_without_uia:
-                    config = self.hs.config.experimental.msc3861
-                    if config.account_management_url is not None:
-                        url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset"
-                    else:
-                        url = config.issuer
+                    # If MSC3861 is enabled, we can assume self.auth is an instance of MSC3861DelegatedAuth
+                    # We import lazily here because of the authlib requirement
+                    from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth
 
-                    raise SynapseError(
-                        HTTPStatus.NOT_IMPLEMENTED,
-                        "To reset your end-to-end encryption cross-signing identity, "
-                        f"you first need to approve it at {url} and then try again.",
-                        Codes.UNRECOGNIZED,
+                    auth = cast(MSC3861DelegatedAuth, self.auth)
+
+                    uri = await auth.account_management_url()
+                    if uri is not None:
+                        url = f"{uri}?action=org.matrix.cross_signing_reset"
+                    else:
+                        url = await auth.issuer()
+
+                    # We use a dummy session ID as this isn't really a UIA flow, but we
+                    # reuse the same API shape for better client compatibility.
+                    raise InteractiveAuthIncompleteError(
+                        "dummy",
+                        {
+                            "session": "dummy",
+                            "flows": [
+                                {"stages": ["org.matrix.cross_signing_reset"]},
+                            ],
+                            "params": {
+                                "org.matrix.cross_signing_reset": {
+                                    "url": url,
+                                },
+                            },
+                            "msg": "To reset your end-to-end encryption cross-signing "
+                            f"identity, you first need to approve it at {url} and "
+                            "then try again.",
+                        },
                     )
             else:
                 # Without MSC3861, we require UIA.
diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py
index e31687fc13..d7a17e1b35 100644
--- a/synapse/rest/client/knock.py
+++ b/synapse/rest/client/knock.py
@@ -53,7 +53,6 @@ class KnockRoomAliasServlet(RestServlet):
         super().__init__()
         self.room_member_handler = hs.get_room_member_handler()
         self.auth = hs.get_auth()
-        self._support_via = hs.config.experimental.msc4156_enabled
 
     async def on_POST(
         self,
@@ -72,15 +71,11 @@ class KnockRoomAliasServlet(RestServlet):
 
             # twisted.web.server.Request.args is incorrectly defined as Optional[Any]
             args: Dict[bytes, List[bytes]] = request.args  # type: ignore
-            remote_room_hosts = parse_strings_from_args(
-                args, "server_name", required=False
-            )
-            if self._support_via:
+            # Prefer via over server_name (deprecated with MSC4156)
+            remote_room_hosts = parse_strings_from_args(args, "via", required=False)
+            if remote_room_hosts is None:
                 remote_room_hosts = parse_strings_from_args(
-                    args,
-                    "org.matrix.msc4156.via",
-                    default=remote_room_hosts,
-                    required=False,
+                    args, "server_name", required=False
                 )
         elif RoomAlias.is_valid(room_identifier):
             handler = self.room_member_handler
diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py
index ae691bcdba..3271b02d40 100644
--- a/synapse/rest/client/login.py
+++ b/synapse/rest/client/login.py
@@ -268,7 +268,7 @@ class LoginRestServlet(RestServlet):
                     approval_notice_medium=ApprovalNoticeMedium.NONE,
                 )
 
-        well_known_data = self._well_known_builder.get_well_known()
+        well_known_data = await self._well_known_builder.get_well_known()
         if well_known_data:
             result["well_known"] = well_known_data
         return 200, result
@@ -363,6 +363,7 @@ class LoginRestServlet(RestServlet):
         login_submission: JsonDict,
         callback: Optional[Callable[[LoginResponse], Awaitable[None]]] = None,
         create_non_existent_users: bool = False,
+        default_display_name: Optional[str] = None,
         ratelimit: bool = True,
         auth_provider_id: Optional[str] = None,
         should_issue_refresh_token: bool = False,
@@ -410,7 +411,8 @@ class LoginRestServlet(RestServlet):
             canonical_uid = await self.auth_handler.check_user_exists(user_id)
             if not canonical_uid:
                 canonical_uid = await self.registration_handler.register_user(
-                    localpart=UserID.from_string(user_id).localpart
+                    localpart=UserID.from_string(user_id).localpart,
+                    default_display_name=default_display_name,
                 )
             user_id = canonical_uid
 
@@ -546,11 +548,14 @@ class LoginRestServlet(RestServlet):
         Returns:
             The body of the JSON response.
         """
-        user_id = self.hs.get_jwt_handler().validate_login(login_submission)
+        user_id, default_display_name = self.hs.get_jwt_handler().validate_login(
+            login_submission
+        )
         return await self._complete_login(
             user_id,
             login_submission,
             create_non_existent_users=True,
+            default_display_name=default_display_name,
             should_issue_refresh_token=should_issue_refresh_token,
             request_info=request_info,
         )
diff --git a/synapse/rest/client/media.py b/synapse/rest/client/media.py
index c0ae5dd66f..25b302370f 100644
--- a/synapse/rest/client/media.py
+++ b/synapse/rest/client/media.py
@@ -47,7 +47,7 @@ from synapse.util.stringutils import parse_and_validate_server_name
 logger = logging.getLogger(__name__)
 
 
-class UnstablePreviewURLServlet(RestServlet):
+class PreviewURLServlet(RestServlet):
     """
     Same as `GET /_matrix/media/r0/preview_url`, this endpoint provides a generic preview API
     for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix
@@ -65,9 +65,7 @@ class UnstablePreviewURLServlet(RestServlet):
       * Matrix cannot be used to distribute the metadata between homeservers.
     """
 
-    PATTERNS = [
-        re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/preview_url$")
-    ]
+    PATTERNS = [re.compile(r"^/_matrix/client/v1/media/preview_url$")]
 
     def __init__(
         self,
@@ -95,10 +93,8 @@ class UnstablePreviewURLServlet(RestServlet):
         respond_with_json_bytes(request, 200, og, send_cors=True)
 
 
-class UnstableMediaConfigResource(RestServlet):
-    PATTERNS = [
-        re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/config$")
-    ]
+class MediaConfigResource(RestServlet):
+    PATTERNS = [re.compile(r"^/_matrix/client/v1/media/config$")]
 
     def __init__(self, hs: "HomeServer"):
         super().__init__()
@@ -112,10 +108,10 @@ class UnstableMediaConfigResource(RestServlet):
         respond_with_json(request, 200, self.limits_dict, send_cors=True)
 
 
-class UnstableThumbnailResource(RestServlet):
+class ThumbnailResource(RestServlet):
     PATTERNS = [
         re.compile(
-            "/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)$"
+            "/_matrix/client/v1/media/thumbnail/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)$"
         )
     ]
 
@@ -142,7 +138,7 @@ class UnstableThumbnailResource(RestServlet):
     ) -> None:
         # Validate the server name, raising if invalid
         parse_and_validate_server_name(server_name)
-        await self.auth.get_user_by_req(request)
+        await self.auth.get_user_by_req(request, allow_guest=True)
 
         set_cors_headers(request)
         set_corp_headers(request)
@@ -159,11 +155,25 @@ class UnstableThumbnailResource(RestServlet):
         if self._is_mine_server_name(server_name):
             if self.dynamic_thumbnails:
                 await self.thumbnailer.select_or_generate_local_thumbnail(
-                    request, media_id, width, height, method, m_type, max_timeout_ms
+                    request,
+                    media_id,
+                    width,
+                    height,
+                    method,
+                    m_type,
+                    max_timeout_ms,
+                    False,
                 )
             else:
                 await self.thumbnailer.respond_local_thumbnail(
-                    request, media_id, width, height, method, m_type, max_timeout_ms
+                    request,
+                    media_id,
+                    width,
+                    height,
+                    method,
+                    m_type,
+                    max_timeout_ms,
+                    False,
                 )
             self.media_repo.mark_recently_accessed(None, media_id)
         else:
@@ -191,6 +201,7 @@ class UnstableThumbnailResource(RestServlet):
                 m_type,
                 max_timeout_ms,
                 ip_address,
+                True,
             )
             self.media_repo.mark_recently_accessed(server_name, media_id)
 
@@ -218,7 +229,7 @@ class DownloadResource(RestServlet):
         # Validate the server name, raising if invalid
         parse_and_validate_server_name(server_name)
 
-        await self.auth.get_user_by_req(request)
+        await self.auth.get_user_by_req(request, allow_guest=True)
 
         set_cors_headers(request)
         set_corp_headers(request)
@@ -260,11 +271,9 @@ class DownloadResource(RestServlet):
 def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
     media_repo = hs.get_media_repository()
     if hs.config.media.url_preview_enabled:
-        UnstablePreviewURLServlet(hs, media_repo, media_repo.media_storage).register(
+        PreviewURLServlet(hs, media_repo, media_repo.media_storage).register(
             http_server
         )
-    UnstableMediaConfigResource(hs).register(http_server)
-    UnstableThumbnailResource(hs, media_repo, media_repo.media_storage).register(
-        http_server
-    )
+    MediaConfigResource(hs).register(http_server)
+    ThumbnailResource(hs, media_repo, media_repo.media_storage).register(http_server)
     DownloadResource(hs, media_repo).register(http_server)
diff --git a/synapse/rest/client/presence.py b/synapse/rest/client/presence.py
index 572e92642c..ecc52956e4 100644
--- a/synapse/rest/client/presence.py
+++ b/synapse/rest/client/presence.py
@@ -19,8 +19,8 @@
 #
 #
 
-""" This module contains REST servlets to do with presence: /presence/<paths>
-"""
+"""This module contains REST servlets to do with presence: /presence/<paths>"""
+
 import logging
 from typing import TYPE_CHECKING, Tuple
 
diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py
index c1a80c5c3d..7a95b9445d 100644
--- a/synapse/rest/client/profile.py
+++ b/synapse/rest/client/profile.py
@@ -19,7 +19,7 @@
 #
 #
 
-""" This module contains REST servlets to do with profile: /profile/<paths> """
+"""This module contains REST servlets to do with profile: /profile/<paths>"""
 
 from http import HTTPStatus
 from typing import TYPE_CHECKING, Tuple
diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index 5dddbc69be..61e1436841 100644
--- a/synapse/rest/client/register.py
+++ b/synapse/rest/client/register.py
@@ -640,12 +640,10 @@ class RegisterRestServlet(RestServlet):
             if not password_hash:
                 raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM)
 
-            desired_username = (
-                await (
-                    self.password_auth_provider.get_username_for_registration(
-                        auth_result,
-                        params,
-                    )
+            desired_username = await (
+                self.password_auth_provider.get_username_for_registration(
+                    auth_result,
+                    params,
                 )
             )
 
@@ -696,11 +694,9 @@ class RegisterRestServlet(RestServlet):
                 session_id
             )
 
-            display_name = (
-                await (
-                    self.password_auth_provider.get_displayname_for_registration(
-                        auth_result, params
-                    )
+            display_name = await (
+                self.password_auth_provider.get_displayname_for_registration(
+                    auth_result, params
                 )
             )
 
diff --git a/synapse/rest/client/reporting.py b/synapse/rest/client/reporting.py
index 4eee53e5a8..97bd5d8c02 100644
--- a/synapse/rest/client/reporting.py
+++ b/synapse/rest/client/reporting.py
@@ -23,7 +23,7 @@ import logging
 from http import HTTPStatus
 from typing import TYPE_CHECKING, Tuple
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
+from synapse._pydantic_compat import StrictStr
 from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
 from synapse.http.server import HttpServer
 from synapse.http.servlet import (
@@ -40,10 +40,6 @@ from ._base import client_patterns
 if TYPE_CHECKING:
     from synapse.server import HomeServer
 
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import StrictStr
-else:
-    from pydantic import StrictStr
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 903c74f6d8..8883cd6bc0 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -2,7 +2,7 @@
 # This file is licensed under the Affero General Public License (AGPL) version 3.
 #
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -19,7 +19,8 @@
 #
 #
 
-""" This module contains REST servlets to do with rooms: /rooms/<paths> """
+"""This module contains REST servlets to do with rooms: /rooms/<paths>"""
+
 import logging
 import re
 from enum import Enum
@@ -67,7 +68,8 @@ from synapse.streams.config import PaginationConfig
 from synapse.types import JsonDict, Requester, StreamToken, ThirdPartyInstanceID, UserID
 from synapse.types.state import StateFilter
 from synapse.util.cancellation import cancellable
-from synapse.util.stringutils import parse_and_validate_server_name, random_string
+from synapse.util.events import generate_fake_event_id
+from synapse.util.stringutils import parse_and_validate_server_name
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -193,7 +195,9 @@ class RoomStateEventRestServlet(RestServlet):
         self.event_creation_handler = hs.get_event_creation_handler()
         self.room_member_handler = hs.get_room_member_handler()
         self.message_handler = hs.get_message_handler()
+        self.delayed_events_handler = hs.get_delayed_events_handler()
         self.auth = hs.get_auth()
+        self._max_event_delay_ms = hs.config.server.max_event_delay_ms
 
     def register(self, http_server: HttpServer) -> None:
         # /rooms/$roomid/state/$eventtype
@@ -289,6 +293,22 @@ class RoomStateEventRestServlet(RestServlet):
         if requester.app_service:
             origin_server_ts = parse_integer(request, "ts")
 
+        delay = _parse_request_delay(request, self._max_event_delay_ms)
+        if delay is not None:
+            delay_id = await self.delayed_events_handler.add(
+                requester,
+                room_id=room_id,
+                event_type=event_type,
+                state_key=state_key,
+                origin_server_ts=origin_server_ts,
+                content=content,
+                delay=delay,
+            )
+
+            set_tag("delay_id", delay_id)
+            ret = {"delay_id": delay_id}
+            return 200, ret
+
         try:
             if event_type == EventTypes.Member:
                 membership = content.get("membership", None)
@@ -325,7 +345,7 @@ class RoomStateEventRestServlet(RestServlet):
                 )
                 event_id = event.event_id
         except ShadowBanError:
-            event_id = "$" + random_string(43)
+            event_id = generate_fake_event_id()
 
         set_tag("event_id", event_id)
         ret = {"event_id": event_id}
@@ -339,7 +359,9 @@ class RoomSendEventRestServlet(TransactionRestServlet):
     def __init__(self, hs: "HomeServer"):
         super().__init__(hs)
         self.event_creation_handler = hs.get_event_creation_handler()
+        self.delayed_events_handler = hs.get_delayed_events_handler()
         self.auth = hs.get_auth()
+        self._max_event_delay_ms = hs.config.server.max_event_delay_ms
 
     def register(self, http_server: HttpServer) -> None:
         # /rooms/$roomid/send/$event_type[/$txn_id]
@@ -356,6 +378,26 @@ class RoomSendEventRestServlet(TransactionRestServlet):
     ) -> Tuple[int, JsonDict]:
         content = parse_json_object_from_request(request)
 
+        origin_server_ts = None
+        if requester.app_service:
+            origin_server_ts = parse_integer(request, "ts")
+
+        delay = _parse_request_delay(request, self._max_event_delay_ms)
+        if delay is not None:
+            delay_id = await self.delayed_events_handler.add(
+                requester,
+                room_id=room_id,
+                event_type=event_type,
+                state_key=None,
+                origin_server_ts=origin_server_ts,
+                content=content,
+                delay=delay,
+            )
+
+            set_tag("delay_id", delay_id)
+            ret = {"delay_id": delay_id}
+            return 200, ret
+
         event_dict: JsonDict = {
             "type": event_type,
             "content": content,
@@ -363,10 +405,8 @@ class RoomSendEventRestServlet(TransactionRestServlet):
             "sender": requester.user.to_string(),
         }
 
-        if requester.app_service:
-            origin_server_ts = parse_integer(request, "ts")
-            if origin_server_ts is not None:
-                event_dict["origin_server_ts"] = origin_server_ts
+        if origin_server_ts is not None:
+            event_dict["origin_server_ts"] = origin_server_ts
 
         try:
             (
@@ -377,7 +417,7 @@ class RoomSendEventRestServlet(TransactionRestServlet):
             )
             event_id = event.event_id
         except ShadowBanError:
-            event_id = "$" + random_string(43)
+            event_id = generate_fake_event_id()
 
         set_tag("event_id", event_id)
         return 200, {"event_id": event_id}
@@ -409,6 +449,49 @@ class RoomSendEventRestServlet(TransactionRestServlet):
         )
 
 
+def _parse_request_delay(
+    request: SynapseRequest,
+    max_delay: Optional[int],
+) -> Optional[int]:
+    """Parses from the request string the delay parameter for
+        delayed event requests, and checks it for correctness.
+
+    Args:
+        request: the twisted HTTP request.
+        max_delay: the maximum allowed value of the delay parameter,
+            or None if no delay parameter is allowed.
+    Returns:
+        The value of the requested delay, or None if it was absent.
+
+    Raises:
+        SynapseError: if the delay parameter is present and forbidden,
+            or if it exceeds the maximum allowed value.
+    """
+    delay = parse_integer(request, "org.matrix.msc4140.delay")
+    if delay is None:
+        return None
+    if max_delay is None:
+        raise SynapseError(
+            HTTPStatus.BAD_REQUEST,
+            "Delayed events are not supported on this server",
+            Codes.UNKNOWN,
+            {
+                "org.matrix.msc4140.errcode": "M_MAX_DELAY_UNSUPPORTED",
+            },
+        )
+    if delay > max_delay:
+        raise SynapseError(
+            HTTPStatus.BAD_REQUEST,
+            "The requested delay exceeds the allowed maximum.",
+            Codes.UNKNOWN,
+            {
+                "org.matrix.msc4140.errcode": "M_MAX_DELAY_EXCEEDED",
+                "org.matrix.msc4140.max_delay": max_delay,
+            },
+        )
+    return delay
+
+
 # TODO: Needs unit testing for room ID + alias joins
 class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
     CATEGORY = "Event sending requests"
@@ -417,7 +500,6 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
         super().__init__(hs)
         super(ResolveRoomIdMixin, self).__init__(hs)  # ensure the Mixin is set up
         self.auth = hs.get_auth()
-        self._support_via = hs.config.experimental.msc4156_enabled
 
     def register(self, http_server: HttpServer) -> None:
         # /join/$room_identifier[/$txn_id]
@@ -435,13 +517,11 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
 
         # twisted.web.server.Request.args is incorrectly defined as Optional[Any]
         args: Dict[bytes, List[bytes]] = request.args  # type: ignore
-        remote_room_hosts = parse_strings_from_args(args, "server_name", required=False)
-        if self._support_via:
+        # Prefer via over server_name (deprecated with MSC4156)
+        remote_room_hosts = parse_strings_from_args(args, "via", required=False)
+        if remote_room_hosts is None:
             remote_room_hosts = parse_strings_from_args(
-                args,
-                "org.matrix.msc4156.via",
-                default=remote_room_hosts,
-                required=False,
+                args, "server_name", required=False
             )
         room_id, remote_room_hosts = await self.resolve_room_id(
             room_identifier,
@@ -1193,7 +1273,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
 
             event_id = event.event_id
         except ShadowBanError:
-            event_id = "$" + random_string(43)
+            event_id = generate_fake_event_id()
 
         set_tag("event_id", event_id)
         return 200, {"event_id": event_id}
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 2a22bc14ec..122708e933 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -21,7 +21,7 @@
 import itertools
 import logging
 from collections import defaultdict
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
 
 from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState
 from synapse.api.errors import Codes, StoreError, SynapseError
@@ -52,9 +52,9 @@ from synapse.http.servlet import (
     parse_string,
 )
 from synapse.http.site import SynapseRequest
-from synapse.logging.opentracing import trace_with_opname
+from synapse.logging.opentracing import log_kv, set_tag, trace_with_opname
 from synapse.rest.admin.experimental_features import ExperimentalFeature
-from synapse.types import JsonDict, Requester, StreamToken
+from synapse.types import JsonDict, Requester, SlidingSyncStreamToken, StreamToken
 from synapse.types.rest.client import SlidingSyncBody
 from synapse.util import json_decoder
 from synapse.util.caches.lrucache import LruCache
@@ -881,7 +881,6 @@ class SlidingSyncRestServlet(RestServlet):
         )
 
         user = requester.user
-        device_id = requester.device_id
 
         timeout = parse_integer(request, "timeout", default=0)
         # Position in the stream
@@ -889,22 +888,50 @@ class SlidingSyncRestServlet(RestServlet):
 
         from_token = None
         if from_token_string is not None:
-            from_token = await StreamToken.from_string(self.store, from_token_string)
+            from_token = await SlidingSyncStreamToken.from_string(
+                self.store, from_token_string
+            )
 
         # TODO: We currently don't know whether we're going to use sticky params or
         # maybe some filters like sync v2  where they are built up once and referenced
         # by filter ID. For now, we will just prototype with always passing everything
         # in.
         body = parse_and_validate_json_object_from_request(request, SlidingSyncBody)
-        logger.info("Sliding sync request: %r", body)
+
+        # Tag and log useful data to differentiate requests.
+        set_tag(
+            "sliding_sync.sync_type", "initial" if from_token is None else "incremental"
+        )
+        set_tag("sliding_sync.conn_id", body.conn_id or "")
+        log_kv(
+            {
+                "sliding_sync.lists": {
+                    list_name: {
+                        "ranges": list_config.ranges,
+                        "timeline_limit": list_config.timeline_limit,
+                    }
+                    for list_name, list_config in (body.lists or {}).items()
+                },
+                "sliding_sync.room_subscriptions": list(
+                    (body.room_subscriptions or {}).keys()
+                ),
+                # We also include the number of room subscriptions because logs are
+                # limited to 1024 characters and the large room ID list above can be cut
+                # off.
+                "sliding_sync.num_room_subscriptions": len(
+                    (body.room_subscriptions or {}).keys()
+                ),
+            }
+        )
 
         sync_config = SlidingSyncConfig(
             user=user,
-            device_id=device_id,
+            requester=requester,
             # FIXME: Currently, we're just manually copying the fields from the
-            # `SlidingSyncBody` into the config. How can we gurantee into the future
+            # `SlidingSyncBody` into the config. How can we guarantee into the future
             # that we don't forget any? I would like something more structured like
             # `copy_attributes(from=body, to=config)`
+            conn_id=body.conn_id,
             lists=body.lists,
             room_subscriptions=body.room_subscriptions,
             extensions=body.extensions,
@@ -927,7 +954,6 @@ class SlidingSyncRestServlet(RestServlet):
 
         return 200, response_content
 
-    # TODO: Is there a better way to encode things?
     async def encode_response(
         self,
         requester: Requester,
@@ -942,12 +968,14 @@ class SlidingSyncRestServlet(RestServlet):
         response["rooms"] = await self.encode_rooms(
             requester, sliding_sync_result.rooms
         )
-        response["extensions"] = {}  # TODO: sliding_sync_result.extensions
+        response["extensions"] = await self.encode_extensions(
+            requester, sliding_sync_result.extensions
+        )
 
         return response
 
     def encode_lists(
-        self, lists: Dict[str, SlidingSyncResult.SlidingWindowList]
+        self, lists: Mapping[str, SlidingSyncResult.SlidingWindowList]
     ) -> JsonDict:
         def encode_operation(
             operation: SlidingSyncResult.SlidingWindowList.Operation,
@@ -982,28 +1010,56 @@ class SlidingSyncRestServlet(RestServlet):
         serialized_rooms: Dict[str, JsonDict] = {}
         for room_id, room_result in rooms.items():
             serialized_rooms[room_id] = {
-                "joined_count": room_result.joined_count,
-                "invited_count": room_result.invited_count,
                 "notification_count": room_result.notification_count,
                 "highlight_count": room_result.highlight_count,
             }
 
+            if room_result.bump_stamp is not None:
+                serialized_rooms[room_id]["bump_stamp"] = room_result.bump_stamp
+
+            if room_result.joined_count is not None:
+                serialized_rooms[room_id]["joined_count"] = room_result.joined_count
+
+            if room_result.invited_count is not None:
+                serialized_rooms[room_id]["invited_count"] = room_result.invited_count
+
             if room_result.name:
                 serialized_rooms[room_id]["name"] = room_result.name
 
             if room_result.avatar:
                 serialized_rooms[room_id]["avatar"] = room_result.avatar
 
-            if room_result.heroes:
-                serialized_rooms[room_id]["heroes"] = room_result.heroes
+            if room_result.heroes is not None and len(room_result.heroes) > 0:
+                serialized_heroes = []
+                for hero in room_result.heroes:
+                    serialized_hero = {
+                        "user_id": hero.user_id,
+                    }
+                    if hero.display_name is not None:
+                        # Not a typo, just how "displayname" is spelled in the spec
+                        serialized_hero["displayname"] = hero.display_name
+
+                    if hero.avatar_url is not None:
+                        serialized_hero["avatar_url"] = hero.avatar_url
+
+                    serialized_heroes.append(serialized_hero)
+                serialized_rooms[room_id]["heroes"] = serialized_heroes
 
             # We should only include the `initial` key if it's `True` to save bandwidth.
-            # The absense of this flag means `False`.
+            # The absence of this flag means `False`.
             if room_result.initial:
                 serialized_rooms[room_id]["initial"] = room_result.initial
 
+            if room_result.unstable_expanded_timeline:
+                serialized_rooms[room_id]["unstable_expanded_timeline"] = (
+                    room_result.unstable_expanded_timeline
+                )
+
             # This will be omitted for invite/knock rooms with `stripped_state`
-            if room_result.required_state is not None:
+            if (
+                room_result.required_state is not None
+                and len(room_result.required_state) > 0
+            ):
                 serialized_required_state = (
                     await self.event_serializer.serialize_events(
                         room_result.required_state,
@@ -1014,7 +1070,10 @@ class SlidingSyncRestServlet(RestServlet):
                 serialized_rooms[room_id]["required_state"] = serialized_required_state
 
             # This will be omitted for invite/knock rooms with `stripped_state`
-            if room_result.timeline_events is not None:
+            if (
+                room_result.timeline_events is not None
+                and len(room_result.timeline_events) > 0
+            ):
                 serialized_timeline = await self.event_serializer.serialize_events(
                     room_result.timeline_events,
                     time_now,
@@ -1029,9 +1088,9 @@ class SlidingSyncRestServlet(RestServlet):
 
             # This will be omitted for invite/knock rooms with `stripped_state`
             if room_result.prev_batch is not None:
-                serialized_rooms[room_id]["prev_batch"] = (
-                    await room_result.prev_batch.to_string(self.store)
-                )
+                serialized_rooms[room_id][
+                    "prev_batch"
+                ] = await room_result.prev_batch.to_string(self.store)
 
             # This will be omitted for invite/knock rooms with `stripped_state`
             if room_result.num_live is not None:
@@ -1042,7 +1101,10 @@ class SlidingSyncRestServlet(RestServlet):
                 serialized_rooms[room_id]["is_dm"] = room_result.is_dm
 
             # Stripped state only applies to invite/knock rooms
-            if room_result.stripped_state is not None:
+            if (
+                room_result.stripped_state is not None
+                and len(room_result.stripped_state) > 0
+            ):
                 # TODO: `knocked_state` but that isn't specced yet.
                 #
                 # TODO: Instead of adding `knocked_state`, it would be good to rename
@@ -1053,6 +1115,73 @@ class SlidingSyncRestServlet(RestServlet):
 
         return serialized_rooms
 
+    async def encode_extensions(
+        self, requester: Requester, extensions: SlidingSyncResult.Extensions
+    ) -> JsonDict:
+        serialized_extensions: JsonDict = {}
+
+        if extensions.to_device is not None:
+            serialized_extensions["to_device"] = {
+                "next_batch": extensions.to_device.next_batch,
+                "events": extensions.to_device.events,
+            }
+
+        if extensions.e2ee is not None:
+            serialized_extensions["e2ee"] = {
+                # We always include this because
+                # https://github.com/vector-im/element-android/issues/3725. The spec
+                # isn't terribly clear on when this can be omitted and how a client
+                # would tell the difference between "no keys present" and "nothing
+                # changed" in terms of whole field absent / individual key type entry
+                # absent Corresponding synapse issue:
+                # https://github.com/matrix-org/synapse/issues/10456
+                "device_one_time_keys_count": extensions.e2ee.device_one_time_keys_count,
+                # https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md
+                # states that this field should always be included, as long as the
+                # server supports the feature.
+                "device_unused_fallback_key_types": extensions.e2ee.device_unused_fallback_key_types,
+            }
+
+            if extensions.e2ee.device_list_updates is not None:
+                serialized_extensions["e2ee"]["device_lists"] = {}
+
+                serialized_extensions["e2ee"]["device_lists"]["changed"] = list(
+                    extensions.e2ee.device_list_updates.changed
+                )
+                serialized_extensions["e2ee"]["device_lists"]["left"] = list(
+                    extensions.e2ee.device_list_updates.left
+                )
+
+        if extensions.account_data is not None:
+            serialized_extensions["account_data"] = {
+                # Same as the the top-level `account_data.events` field in Sync v2.
+                "global": [
+                    {"type": account_data_type, "content": content}
+                    for account_data_type, content in extensions.account_data.global_account_data_map.items()
+                ],
+                # Same as the joined room's account_data field in Sync v2, e.g the path
+                # `rooms.join["!foo:bar"].account_data.events`.
+                "rooms": {
+                    room_id: [
+                        {"type": account_data_type, "content": content}
+                        for account_data_type, content in event_map.items()
+                    ]
+                    for room_id, event_map in extensions.account_data.account_data_by_room_map.items()
+                },
+            }
+
+        if extensions.receipts is not None:
+            serialized_extensions["receipts"] = {
+                "rooms": extensions.receipts.room_id_to_receipt_map,
+            }
+
+        if extensions.typing is not None:
+            serialized_extensions["typing"] = {
+                "rooms": extensions.typing.room_id_to_typing_map,
+            }
+
+        return serialized_extensions
+
 
 def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
     SyncRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py
index 30c1f17fc6..f791904168 100644
--- a/synapse/rest/client/transactions.py
+++ b/synapse/rest/client/transactions.py
@@ -21,6 +21,7 @@
 
 """This module contains logic for storing HTTP PUT transactions. This is used
 to ensure idempotency when performing PUTs using the REST API."""
+
 import logging
 from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Hashable, Tuple
 
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index e01e5f542a..8028cf8ad2 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -4,7 +4,7 @@
 # Copyright 2019 The Matrix.org Foundation C.I.C.
 # Copyright 2017 Vector Creations Ltd
 # Copyright 2016 OpenMarket Ltd
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -64,14 +64,23 @@ class VersionsRestServlet(RestServlet):
 
     async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
         msc3881_enabled = self.config.experimental.msc3881_enabled
+        msc3575_enabled = self.config.experimental.msc3575_enabled
 
         if self.auth.has_access_token(request):
-            requester = await self.auth.get_user_by_req(request)
+            requester = await self.auth.get_user_by_req(
+                request,
+                allow_guest=True,
+                allow_locked=True,
+                allow_expired=True,
+            )
             user_id = requester.user.to_string()
 
             msc3881_enabled = await self.store.is_feature_enabled(
                 user_id, ExperimentalFeature.MSC3881
             )
+            msc3575_enabled = await self.store.is_feature_enabled(
+                user_id, ExperimentalFeature.MSC3575
+            )
 
         return (
             200,
@@ -102,6 +111,7 @@ class VersionsRestServlet(RestServlet):
                     "v1.8",
                     "v1.9",
                     "v1.10",
+                    "v1.11",
                 ],
                 # as per MSC1497:
                 "unstable_features": {
@@ -161,8 +171,12 @@ class VersionsRestServlet(RestServlet):
                             is not None
                         )
                     ),
+                    # MSC4140: Delayed events
+                    "org.matrix.msc4140": bool(self.config.server.max_event_delay_ms),
                     # MSC4151: Report room API (Client-Server API)
                     "org.matrix.msc4151": self.config.experimental.msc4151_enabled,
+                    # Simplified sliding sync
+                    "org.matrix.simplified_msc3575": msc3575_enabled,
                 },
             },
         )
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index a411ed614e..fea0b9706d 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -23,17 +23,11 @@ import logging
 import re
 from typing import TYPE_CHECKING, Dict, Mapping, Optional, Set, Tuple
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import Extra, StrictInt, StrictStr
-else:
-    from pydantic import StrictInt, StrictStr, Extra
-
 from signedjson.sign import sign_json
 
 from twisted.web.server import Request
 
+from synapse._pydantic_compat import Extra, StrictInt, StrictStr
 from synapse.crypto.keyring import ServerKeyFetcher
 from synapse.http.server import HttpServer
 from synapse.http.servlet import (
@@ -191,10 +185,10 @@ class RemoteKey(RestServlet):
         server_keys: Dict[Tuple[str, str], Optional[FetchKeyResultForRemote]] = {}
         for server_name, key_ids in query.items():
             if key_ids:
-                results: Mapping[str, Optional[FetchKeyResultForRemote]] = (
-                    await self.store.get_server_keys_json_for_remote(
-                        server_name, key_ids
-                    )
+                results: Mapping[
+                    str, Optional[FetchKeyResultForRemote]
+                ] = await self.store.get_server_keys_json_for_remote(
+                    server_name, key_ids
                 )
             else:
                 results = await self.store.get_all_server_keys_json_for_remote(
diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py
index c32c626905..3c3f703667 100644
--- a/synapse/rest/media/download_resource.py
+++ b/synapse/rest/media/download_resource.py
@@ -84,7 +84,7 @@ class DownloadResource(RestServlet):
 
         if self._is_mine_server_name(server_name):
             await self.media_repo.get_local_media(
-                request, media_id, file_name, max_timeout_ms
+                request, media_id, file_name, max_timeout_ms, allow_authenticated=False
             )
         else:
             allow_remote = parse_boolean(request, "allow_remote", default=True)
@@ -106,4 +106,5 @@ class DownloadResource(RestServlet):
                 max_timeout_ms,
                 ip_address,
                 False,
+                allow_authenticated=False,
             )
diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py
index ce511c6dce..536fea4c32 100644
--- a/synapse/rest/media/thumbnail_resource.py
+++ b/synapse/rest/media/thumbnail_resource.py
@@ -88,11 +88,27 @@ class ThumbnailResource(RestServlet):
         if self._is_mine_server_name(server_name):
             if self.dynamic_thumbnails:
                 await self.thumbnail_provider.select_or_generate_local_thumbnail(
-                    request, media_id, width, height, method, m_type, max_timeout_ms
+                    request,
+                    media_id,
+                    width,
+                    height,
+                    method,
+                    m_type,
+                    max_timeout_ms,
+                    False,
+                    allow_authenticated=False,
                 )
             else:
                 await self.thumbnail_provider.respond_local_thumbnail(
-                    request, media_id, width, height, method, m_type, max_timeout_ms
+                    request,
+                    media_id,
+                    width,
+                    height,
+                    method,
+                    m_type,
+                    max_timeout_ms,
+                    False,
+                    allow_authenticated=False,
                 )
             self.media_repo.mark_recently_accessed(None, media_id)
         else:
@@ -120,5 +136,7 @@ class ThumbnailResource(RestServlet):
                 m_type,
                 max_timeout_ms,
                 ip_address,
+                use_federation=False,
+                allow_authenticated=False,
             )
             self.media_repo.mark_recently_accessed(server_name, media_id)
diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py
index d0ca8ca46b..d336d60c93 100644
--- a/synapse/rest/well_known.py
+++ b/synapse/rest/well_known.py
@@ -18,12 +18,13 @@
 #
 #
 import logging
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, Optional, Tuple, cast
 
 from twisted.web.resource import Resource
 from twisted.web.server import Request
 
-from synapse.http.server import set_cors_headers
+from synapse.api.errors import NotFoundError
+from synapse.http.server import DirectServeJsonResource
 from synapse.http.site import SynapseRequest
 from synapse.types import JsonDict
 from synapse.util import json_encoder
@@ -38,8 +39,9 @@ logger = logging.getLogger(__name__)
 class WellKnownBuilder:
     def __init__(self, hs: "HomeServer"):
         self._config = hs.config
+        self._auth = hs.get_auth()
 
-    def get_well_known(self) -> Optional[JsonDict]:
+    async def get_well_known(self) -> Optional[JsonDict]:
         if not self._config.server.serve_client_wellknown:
             return None
 
@@ -52,13 +54,20 @@ class WellKnownBuilder:
 
         # We use the MSC3861 values as they are used by multiple MSCs
         if self._config.experimental.msc3861.enabled:
+            # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth
+            # We import lazily here because of the authlib requirement
+            from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth
+
+            auth = cast(MSC3861DelegatedAuth, self._auth)
+
             result["org.matrix.msc2965.authentication"] = {
-                "issuer": self._config.experimental.msc3861.issuer
+                "issuer": await auth.issuer(),
             }
-            if self._config.experimental.msc3861.account_management_url is not None:
-                result["org.matrix.msc2965.authentication"][
-                    "account"
-                ] = self._config.experimental.msc3861.account_management_url
+            account_management_url = await auth.account_management_url()
+            if account_management_url is not None:
+                result["org.matrix.msc2965.authentication"]["account"] = (
+                    account_management_url
+                )
 
         if self._config.server.extra_well_known_client_content:
             for (
@@ -71,26 +80,22 @@ class WellKnownBuilder:
         return result
 
 
-class ClientWellKnownResource(Resource):
+class ClientWellKnownResource(DirectServeJsonResource):
     """A Twisted web resource which renders the .well-known/matrix/client file"""
 
     isLeaf = 1
 
     def __init__(self, hs: "HomeServer"):
-        Resource.__init__(self)
+        super().__init__()
         self._well_known_builder = WellKnownBuilder(hs)
 
-    def render_GET(self, request: SynapseRequest) -> bytes:
-        set_cors_headers(request)
-        r = self._well_known_builder.get_well_known()
+    async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+        r = await self._well_known_builder.get_well_known()
         if not r:
-            request.setResponseCode(404)
-            request.setHeader(b"Content-Type", b"text/plain")
-            return b".well-known not available"
+            raise NotFoundError(".well-known not available")
 
         logger.debug("returning: %s", r)
-        request.setHeader(b"Content-Type", b"application/json")
-        return json_encoder.encode(r).encode("utf-8")
+        return 200, r
 
 
 class ServerWellKnownResource(Resource):
diff --git a/synapse/server.py b/synapse/server.py
index 4a3f9ff934..318c6abf3d 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -2,7 +2,7 @@
 # This file is licensed under the Affero General Public License (AGPL) version 3.
 #
 # Copyright 2021 The Matrix.org Foundation C.I.C.
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -34,6 +34,7 @@ from typing_extensions import TypeAlias
 
 from twisted.internet.interfaces import IOpenSSLContextFactory
 from twisted.internet.tcp import Port
+from twisted.python.threadpool import ThreadPool
 from twisted.web.iweb import IPolicyForHTTPS
 from twisted.web.resource import Resource
 
@@ -67,6 +68,7 @@ from synapse.handlers.appservice import ApplicationServicesHandler
 from synapse.handlers.auth import AuthHandler, PasswordAuthProvider
 from synapse.handlers.cas import CasHandler
 from synapse.handlers.deactivate_account import DeactivateAccountHandler
+from synapse.handlers.delayed_events import DelayedEventsHandler
 from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler
 from synapse.handlers.devicemessage import DeviceMessageHandler
 from synapse.handlers.directory import DirectoryHandler
@@ -123,6 +125,7 @@ from synapse.http.client import (
 )
 from synapse.http.matrixfederationclient import MatrixFederationHttpClient
 from synapse.media.media_repository import MediaRepository
+from synapse.metrics import register_threadpool
 from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
 from synapse.module_api import ModuleApi
 from synapse.module_api.callbacks import ModuleApiCallbacks
@@ -249,6 +252,7 @@ class HomeServer(metaclass=abc.ABCMeta):
         "account_validity",
         "auth",
         "deactivate_account",
+        "delayed_events",
         "message",
         "pagination",
         "profile",
@@ -559,6 +563,7 @@ class HomeServer(metaclass=abc.ABCMeta):
     def get_sync_handler(self) -> SyncHandler:
         return SyncHandler(self)
 
+    @cache_in_self
     def get_sliding_sync_handler(self) -> SlidingSyncHandler:
         return SlidingSyncHandler(self)
 
@@ -940,3 +945,28 @@ class HomeServer(metaclass=abc.ABCMeta):
     @cache_in_self
     def get_task_scheduler(self) -> TaskScheduler:
         return TaskScheduler(self)
+
+    @cache_in_self
+    def get_media_sender_thread_pool(self) -> ThreadPool:
+        """Fetch the threadpool used to read files when responding to media
+        download requests."""
+
+        # We can choose a large threadpool size as these threads predominately
+        # do IO rather than CPU work.
+        media_threadpool = ThreadPool(
+            name="media_threadpool", minthreads=1, maxthreads=50
+        )
+
+        media_threadpool.start()
+        self.get_reactor().addSystemEventTrigger(
+            "during", "shutdown", media_threadpool.stop
+        )
+
+        # Register the threadpool with our metrics.
+        register_threadpool("media", media_threadpool)
+
+        return media_threadpool
+
+    @cache_in_self
+    def get_delayed_events_handler(self) -> DelayedEventsHandler:
+        return DelayedEventsHandler(self)
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index f6ea90bd4f..e88e8c9b45 100644
--- a/synapse/server_notices/resource_limits_server_notices.py
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -119,7 +119,9 @@ class ResourceLimitsServerNotices:
             elif not currently_blocked and limit_msg:
                 # Room is not notifying of a block, when it ought to be.
                 await self._apply_limit_block_notification(
-                    user_id, limit_msg, limit_type  # type: ignore
+                    user_id,
+                    limit_msg,
+                    limit_type,  # type: ignore
                 )
         except SynapseError as e:
             logger.error("Error sending resource limits server notice: %s", e)
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index b127289d8d..e14d711c76 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -23,8 +23,11 @@ import logging
 from abc import ABCMeta
 from typing import TYPE_CHECKING, Any, Collection, Dict, Iterable, Optional, Union
 
-from synapse.storage.database import make_in_list_sql_clause  # noqa: F401; noqa: F401
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
+from synapse.storage.database import (
+    DatabasePool,
+    LoggingDatabaseConnection,
+    make_in_list_sql_clause,  # noqa: F401
+)
 from synapse.types import get_domain_from_id
 from synapse.util import json_decoder
 from synapse.util.caches.descriptors import CachedFunction
@@ -109,6 +112,7 @@ class SQLBaseStore(metaclass=ABCMeta):
             self._attempt_to_invalidate_cache(
                 "get_number_joined_users_in_room", (room_id,)
             )
+            self._attempt_to_invalidate_cache("get_member_counts", (room_id,))
             self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,))
 
             # There's no easy way of invalidating this cache for just the users
@@ -119,14 +123,20 @@ class SQLBaseStore(metaclass=ABCMeta):
             self._attempt_to_invalidate_cache(
                 "get_user_in_room_with_profile", (room_id, user_id)
             )
-            self._attempt_to_invalidate_cache(
-                "get_rooms_for_user_with_stream_ordering", (user_id,)
-            )
             self._attempt_to_invalidate_cache("get_rooms_for_user", (user_id,))
+            self._attempt_to_invalidate_cache(
+                "_get_rooms_for_local_user_where_membership_is_inner", (user_id,)
+            )
+            self._attempt_to_invalidate_cache(
+                "get_sliding_sync_rooms_for_user", (user_id,)
+            )
 
         # Purge other caches based on room state.
         self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
         self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,))
+        self._attempt_to_invalidate_cache("get_room_type", (room_id,))
+        self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
+        self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
 
     def _invalidate_state_caches_all(self, room_id: str) -> None:
         """Invalidates caches that are based on the current state, but does
@@ -145,14 +155,18 @@ class SQLBaseStore(metaclass=ABCMeta):
         self._attempt_to_invalidate_cache("get_current_hosts_in_room", (room_id,))
         self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,))
         self._attempt_to_invalidate_cache("get_number_joined_users_in_room", (room_id,))
+        self._attempt_to_invalidate_cache("get_member_counts", (room_id,))
         self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,))
         self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None)
         self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None)
-        self._attempt_to_invalidate_cache(
-            "get_rooms_for_user_with_stream_ordering", None
-        )
         self._attempt_to_invalidate_cache("get_rooms_for_user", None)
+        self._attempt_to_invalidate_cache(
+            "_get_rooms_for_local_user_where_membership_is_inner", None
+        )
         self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
+        self._attempt_to_invalidate_cache("get_room_type", (room_id,))
+        self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
+        self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
 
     def _attempt_to_invalidate_cache(
         self, cache_name: str, key: Optional[Collection[Any]]
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index f473294070..34139f580d 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -40,20 +40,15 @@ from typing import (
 
 import attr
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
+from synapse._pydantic_compat import BaseModel
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.types import Connection, Cursor
-from synapse.types import JsonDict
+from synapse.types import JsonDict, StrCollection
 from synapse.util import Clock, json_encoder
 
 from . import engines
 
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import BaseModel
-else:
-    from pydantic import BaseModel
-
 if TYPE_CHECKING:
     from synapse.server import HomeServer
     from synapse.storage.database import (
@@ -487,6 +482,31 @@ class BackgroundUpdater:
 
         return not update_exists
 
+    async def have_completed_background_updates(
+        self, update_names: StrCollection
+    ) -> bool:
+        """Return the name of background updates that have not yet been
+        completed"""
+        if self._all_done:
+            return True
+
+        # We now check if we have completed all pending background updates. We
+        # do this as once this returns True then it will set `self._all_done`
+        # and we can skip checking the database in future.
+        if await self.has_completed_background_updates():
+            return True
+
+        rows = await self.db_pool.simple_select_many_batch(
+            table="background_updates",
+            column="update_name",
+            iterable=update_names,
+            retcols=("update_name",),
+            desc="get_uncompleted_background_updates",
+        )
+
+        # If we find any rows then we've not completed the update.
+        return not bool(rows)
+
     async def do_next_background_update(self, sleep: bool = True) -> bool:
         """Does some amount of work on the next queued background update
 
diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
index d0e015bf19..879ee9039e 100644
--- a/synapse/storage/controllers/persist_events.py
+++ b/synapse/storage/controllers/persist_events.py
@@ -416,7 +416,7 @@ class EventsPersistenceStorageController:
         set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled))
 
         async def enqueue(
-            item: Tuple[str, List[Tuple[EventBase, EventContext]]]
+            item: Tuple[str, List[Tuple[EventBase, EventContext]]],
         ) -> Dict[str, str]:
             room_id, evs_ctxs = item
             return await self._event_persist_queue.add_to_queue(
@@ -502,8 +502,15 @@ class EventsPersistenceStorageController:
         """
         state = await self._calculate_current_state(room_id)
         delta = await self._calculate_state_delta(room_id, state)
+        sliding_sync_table_changes = (
+            await self.persist_events_store._calculate_sliding_sync_table_changes(
+                room_id, [], delta
+            )
+        )
 
-        await self.persist_events_store.update_current_state(room_id, delta)
+        await self.persist_events_store.update_current_state(
+            room_id, delta, sliding_sync_table_changes
+        )
 
     async def _calculate_current_state(self, room_id: str) -> StateMap[str]:
         """Calculate the current state of a room, based on the forward extremities
@@ -785,9 +792,9 @@ class EventsPersistenceStorageController:
         )
 
         # Remove any events which are prev_events of any existing events.
-        existing_prevs: Collection[str] = (
-            await self.persist_events_store._get_events_which_are_prevs(result)
-        )
+        existing_prevs: Collection[
+            str
+        ] = await self.persist_events_store._get_events_which_are_prevs(result)
         result.difference_update(existing_prevs)
 
         # Finally handle the case where the new events have soft-failed prev
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 569f618193..cb4a5857be 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -35,6 +35,7 @@ from typing import (
     Iterable,
     Iterator,
     List,
+    Mapping,
     Optional,
     Sequence,
     Tuple,
@@ -64,6 +65,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.background_updates import BackgroundUpdater
 from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
 from synapse.storage.types import Connection, Cursor, SQLQueryParameters
+from synapse.types import StrCollection
 from synapse.util.async_helpers import delay_cancellation
 from synapse.util.iterutils import batch_iter
 
@@ -1095,6 +1097,48 @@ class DatabasePool:
 
         txn.execute(sql, vals)
 
+    @staticmethod
+    def simple_insert_returning_txn(
+        txn: LoggingTransaction,
+        table: str,
+        values: Dict[str, Any],
+        returning: StrCollection,
+    ) -> Tuple[Any, ...]:
+        """Executes a `INSERT INTO... RETURNING...` statement (or equivalent for
+        SQLite versions that don't support it).
+        """
+
+        if txn.database_engine.supports_returning:
+            sql = "INSERT INTO %s (%s) VALUES(%s) RETURNING %s" % (
+                table,
+                ", ".join(k for k in values.keys()),
+                ", ".join("?" for _ in values.keys()),
+                ", ".join(k for k in returning),
+            )
+
+            txn.execute(sql, list(values.values()))
+            row = txn.fetchone()
+            assert row is not None
+            return row
+        else:
+            # For old versions of SQLite we do a standard insert and then can
+            # use `last_insert_rowid` to get at the row we just inserted
+            DatabasePool.simple_insert_txn(
+                txn,
+                table=table,
+                values=values,
+            )
+            txn.execute("SELECT last_insert_rowid()")
+            row = txn.fetchone()
+            assert row is not None
+            (rowid,) = row
+
+            row = DatabasePool.simple_select_one_txn(
+                txn, table=table, keyvalues={"rowid": rowid}, retcols=returning
+            )
+            assert row is not None
+            return row
+
     async def simple_insert_many(
         self,
         table: str,
@@ -1254,9 +1298,9 @@ class DatabasePool:
         self,
         txn: LoggingTransaction,
         table: str,
-        keyvalues: Dict[str, Any],
-        values: Dict[str, Any],
-        insertion_values: Optional[Dict[str, Any]] = None,
+        keyvalues: Mapping[str, Any],
+        values: Mapping[str, Any],
+        insertion_values: Optional[Mapping[str, Any]] = None,
         where_clause: Optional[str] = None,
     ) -> bool:
         """
@@ -1299,9 +1343,9 @@ class DatabasePool:
         self,
         txn: LoggingTransaction,
         table: str,
-        keyvalues: Dict[str, Any],
-        values: Dict[str, Any],
-        insertion_values: Optional[Dict[str, Any]] = None,
+        keyvalues: Mapping[str, Any],
+        values: Mapping[str, Any],
+        insertion_values: Optional[Mapping[str, Any]] = None,
         where_clause: Optional[str] = None,
         lock: bool = True,
     ) -> bool:
@@ -1322,7 +1366,7 @@ class DatabasePool:
 
         if lock:
             # We need to lock the table :(
-            self.engine.lock_table(txn, table)
+            txn.database_engine.lock_table(txn, table)
 
         def _getwhere(key: str) -> str:
             # If the value we're passing in is None (aka NULL), we need to use
@@ -1376,13 +1420,13 @@ class DatabasePool:
         # successfully inserted
         return True
 
+    @staticmethod
     def simple_upsert_txn_native_upsert(
-        self,
         txn: LoggingTransaction,
         table: str,
-        keyvalues: Dict[str, Any],
-        values: Dict[str, Any],
-        insertion_values: Optional[Dict[str, Any]] = None,
+        keyvalues: Mapping[str, Any],
+        values: Mapping[str, Any],
+        insertion_values: Optional[Mapping[str, Any]] = None,
         where_clause: Optional[str] = None,
     ) -> bool:
         """
@@ -1535,8 +1579,8 @@ class DatabasePool:
 
             self.simple_upsert_txn_emulated(txn, table, _keys, _vals, lock=False)
 
+    @staticmethod
     def simple_upsert_many_txn_native_upsert(
-        self,
         txn: LoggingTransaction,
         table: str,
         key_names: Collection[str],
@@ -1966,8 +2010,8 @@ class DatabasePool:
     def simple_update_txn(
         txn: LoggingTransaction,
         table: str,
-        keyvalues: Dict[str, Any],
-        updatevalues: Dict[str, Any],
+        keyvalues: Mapping[str, Any],
+        updatevalues: Mapping[str, Any],
     ) -> int:
         """
         Update rows in the given database table.
diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py
index 586e84f2a4..86431f6e40 100644
--- a/synapse/storage/databases/main/__init__.py
+++ b/synapse/storage/databases/main/__init__.py
@@ -3,7 +3,7 @@
 #
 # Copyright 2019-2021 The Matrix.org Foundation C.I.C.
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -33,6 +33,7 @@ from synapse.storage.database import (
     LoggingDatabaseConnection,
     LoggingTransaction,
 )
+from synapse.storage.databases.main.sliding_sync import SlidingSyncStore
 from synapse.storage.databases.main.stats import UserSortOrder
 from synapse.storage.engines import BaseDatabaseEngine
 from synapse.storage.types import Cursor
@@ -43,6 +44,7 @@ from .appservice import ApplicationServiceStore, ApplicationServiceTransactionSt
 from .cache import CacheInvalidationWorkerStore
 from .censor_events import CensorEventsStore
 from .client_ips import ClientIpWorkerStore
+from .delayed_events import DelayedEventsStore
 from .deviceinbox import DeviceInboxStore
 from .devices import DeviceStore
 from .directory import DirectoryStore
@@ -156,6 +158,8 @@ class DataStore(
     LockStore,
     SessionStore,
     TaskSchedulerWorkerStore,
+    SlidingSyncStore,
+    DelayedEventsStore,
 ):
     def __init__(
         self,
diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index 966393869b..e583c182ba 100644
--- a/synapse/storage/databases/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -177,7 +177,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
 
         def get_room_account_data_for_user_txn(
             txn: LoggingTransaction,
-        ) -> Dict[str, Dict[str, JsonDict]]:
+        ) -> Dict[str, Dict[str, JsonMapping]]:
             # The 'content != '{}' condition below prevents us from using
             # `simple_select_list_txn` here, as it doesn't support conditions
             # other than 'equals'.
@@ -194,7 +194,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
 
             txn.execute(sql, (user_id,))
 
-            by_room: Dict[str, Dict[str, JsonDict]] = {}
+            by_room: Dict[str, Dict[str, JsonMapping]] = {}
             for room_id, account_data_type, content in txn:
                 room_data = by_room.setdefault(room_id, {})
 
@@ -394,7 +394,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
 
     async def get_updated_global_account_data_for_user(
         self, user_id: str, stream_id: int
-    ) -> Mapping[str, JsonMapping]:
+    ) -> Dict[str, JsonMapping]:
         """Get all the global account_data that's changed for a user.
 
         Args:
@@ -407,7 +407,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
 
         def get_updated_global_account_data_for_user(
             txn: LoggingTransaction,
-        ) -> Dict[str, JsonDict]:
+        ) -> Dict[str, JsonMapping]:
             sql = """
                 SELECT account_data_type, content FROM account_data
                 WHERE user_id = ? AND stream_id > ?
@@ -429,7 +429,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
 
     async def get_updated_room_account_data_for_user(
         self, user_id: str, stream_id: int
-    ) -> Dict[str, Dict[str, JsonDict]]:
+    ) -> Dict[str, Dict[str, JsonMapping]]:
         """Get all the room account_data that's changed for a user.
 
         Args:
@@ -442,14 +442,14 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
 
         def get_updated_room_account_data_for_user_txn(
             txn: LoggingTransaction,
-        ) -> Dict[str, Dict[str, JsonDict]]:
+        ) -> Dict[str, Dict[str, JsonMapping]]:
             sql = """
                 SELECT room_id, account_data_type, content FROM room_account_data
                 WHERE user_id = ? AND stream_id > ?
             """
             txn.execute(sql, (user_id, stream_id))
 
-            account_data_by_room: Dict[str, Dict[str, JsonDict]] = {}
+            account_data_by_room: Dict[str, Dict[str, JsonMapping]] = {}
             for row in txn:
                 room_account_data = account_data_by_room.setdefault(row[0], {})
                 room_account_data[row[1]] = db_to_json(row[2])
@@ -467,6 +467,56 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
             get_updated_room_account_data_for_user_txn,
         )
 
+    async def get_updated_room_account_data_for_user_for_room(
+        self,
+        # Since there are multiple arguments with the same type, force keyword arguments
+        # so people don't accidentally swap the order
+        *,
+        user_id: str,
+        room_id: str,
+        from_stream_id: int,
+        to_stream_id: int,
+    ) -> Dict[str, JsonMapping]:
+        """Get the room account_data that's changed for a user in a room.
+
+        (> `from_stream_id` and <= `to_stream_id`)
+
+        Args:
+            user_id: The user to get the account_data for.
+            room_id: The room to check
+            from_stream_id: The point in the stream to fetch from
+            to_stream_id: The point in the stream to fetch to
+
+        Returns:
+            A dict of the room account data.
+        """
+
+        def get_updated_room_account_data_for_user_for_room_txn(
+            txn: LoggingTransaction,
+        ) -> Dict[str, JsonMapping]:
+            sql = """
+                SELECT account_data_type, content FROM room_account_data
+                WHERE user_id = ? AND room_id = ? AND stream_id > ? AND stream_id <= ?
+            """
+            txn.execute(sql, (user_id, room_id, from_stream_id, to_stream_id))
+
+            room_account_data: Dict[str, JsonMapping] = {}
+            for row in txn:
+                room_account_data[row[0]] = db_to_json(row[1])
+
+            return room_account_data
+
+        changed = self._account_data_stream_cache.has_entity_changed(
+            user_id, int(from_stream_id)
+        )
+        if not changed:
+            return {}
+
+        return await self.db_pool.runInteraction(
+            "get_updated_room_account_data_for_user_for_room",
+            get_updated_room_account_data_for_user_for_room_txn,
+        )
+
     @cached(max_entries=5000, iterable=True)
     async def ignored_by(self, user_id: str) -> FrozenSet[str]:
         """
diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index c6787faea0..32c3472e58 100644
--- a/synapse/storage/databases/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -41,6 +41,7 @@ from synapse.storage.database import (
     LoggingDatabaseConnection,
     LoggingTransaction,
 )
+from synapse.storage.databases.main.events import SLIDING_SYNC_RELEVANT_STATE_SET
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.util.id_generators import MultiWriterIdGenerator
 from synapse.util.caches.descriptors import CachedFunction
@@ -268,17 +269,32 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
             self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token)  # type: ignore[attr-defined]
 
             if data.type == EventTypes.Member:
-                self.get_rooms_for_user_with_stream_ordering.invalidate(  # type: ignore[attr-defined]
-                    (data.state_key,)
+                self._attempt_to_invalidate_cache(
+                    "get_rooms_for_user", (data.state_key,)
+                )
+                self._attempt_to_invalidate_cache(
+                    "get_sliding_sync_rooms_for_user", None
+                )
+            elif data.type == EventTypes.RoomEncryption:
+                self._attempt_to_invalidate_cache(
+                    "get_room_encryption", (data.room_id,)
+                )
+            elif data.type == EventTypes.Create:
+                self._attempt_to_invalidate_cache("get_room_type", (data.room_id,))
+
+            if (data.type, data.state_key) in SLIDING_SYNC_RELEVANT_STATE_SET:
+                self._attempt_to_invalidate_cache(
+                    "get_sliding_sync_rooms_for_user", None
                 )
-                self.get_rooms_for_user.invalidate((data.state_key,))  # type: ignore[attr-defined]
         elif row.type == EventsStreamAllStateRow.TypeId:
             assert isinstance(data, EventsStreamAllStateRow)
             # Similar to the above, but the entire caches are invalidated. This is
             # unfortunate for the membership caches, but should recover quickly.
             self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token)  # type: ignore[attr-defined]
-            self.get_rooms_for_user_with_stream_ordering.invalidate_all()  # type: ignore[attr-defined]
-            self.get_rooms_for_user.invalidate_all()  # type: ignore[attr-defined]
+            self._attempt_to_invalidate_cache("get_rooms_for_user", None)
+            self._attempt_to_invalidate_cache("get_room_type", (data.room_id,))
+            self._attempt_to_invalidate_cache("get_room_encryption", (data.room_id,))
+            self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
         else:
             raise Exception("Unknown events stream row type %s" % (row.type,))
 
@@ -307,6 +323,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
             "get_unread_event_push_actions_by_room_for_user", (room_id,)
         )
 
+        self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,))
+
         # The `_get_membership_from_event_id` is immutable, except for the
         # case where we look up an event *before* persisting it.
         self._attempt_to_invalidate_cache("_get_membership_from_event_id", (event_id,))
@@ -334,10 +352,13 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
             self._attempt_to_invalidate_cache(
                 "get_invited_rooms_for_local_user", (state_key,)
             )
-            self._attempt_to_invalidate_cache(
-                "get_rooms_for_user_with_stream_ordering", (state_key,)
-            )
             self._attempt_to_invalidate_cache("get_rooms_for_user", (state_key,))
+            self._attempt_to_invalidate_cache(
+                "_get_rooms_for_local_user_where_membership_is_inner", (state_key,)
+            )
+            self._attempt_to_invalidate_cache(
+                "get_sliding_sync_rooms_for_user", (state_key,)
+            )
 
             self._attempt_to_invalidate_cache(
                 "did_forget",
@@ -349,6 +370,13 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
             self._attempt_to_invalidate_cache(
                 "get_forgotten_rooms_for_user", (state_key,)
             )
+        elif etype == EventTypes.Create:
+            self._attempt_to_invalidate_cache("get_room_type", (room_id,))
+        elif etype == EventTypes.RoomEncryption:
+            self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
+
+        if (etype, state_key) in SLIDING_SYNC_RELEVANT_STATE_SET:
+            self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
 
         if relates_to:
             self._attempt_to_invalidate_cache(
@@ -394,21 +422,26 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
         )
         self._attempt_to_invalidate_cache("get_relations_for_event", (room_id,))
 
+        self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,))
+
         self._attempt_to_invalidate_cache("_get_membership_from_event_id", None)
         self._attempt_to_invalidate_cache("get_applicable_edit", None)
         self._attempt_to_invalidate_cache("get_thread_id", None)
         self._attempt_to_invalidate_cache("get_thread_id_for_receipts", None)
         self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None)
-        self._attempt_to_invalidate_cache(
-            "get_rooms_for_user_with_stream_ordering", None
-        )
         self._attempt_to_invalidate_cache("get_rooms_for_user", None)
+        self._attempt_to_invalidate_cache(
+            "_get_rooms_for_local_user_where_membership_is_inner", None
+        )
+        self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
         self._attempt_to_invalidate_cache("did_forget", None)
         self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None)
         self._attempt_to_invalidate_cache("get_references_for_event", None)
         self._attempt_to_invalidate_cache("get_thread_summary", None)
         self._attempt_to_invalidate_cache("get_thread_participated", None)
         self._attempt_to_invalidate_cache("get_threads", (room_id,))
+        self._attempt_to_invalidate_cache("get_room_type", (room_id,))
+        self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
 
         self._attempt_to_invalidate_cache("_get_state_group_for_event", None)
 
@@ -438,6 +471,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
 
         self._attempt_to_invalidate_cache("get_account_data_for_room", None)
         self._attempt_to_invalidate_cache("get_account_data_for_room_and_type", None)
+        self._attempt_to_invalidate_cache("get_tags_for_room", None)
         self._attempt_to_invalidate_cache("get_aliases_for_room", (room_id,))
         self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,))
         self._attempt_to_invalidate_cache("_get_forward_extremeties_for_room", None)
@@ -457,10 +491,15 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
         self._attempt_to_invalidate_cache(
             "get_current_hosts_in_room_ordered", (room_id,)
         )
+        self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
         self._attempt_to_invalidate_cache("did_forget", None)
         self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None)
         self._attempt_to_invalidate_cache("_get_membership_from_event_id", None)
         self._attempt_to_invalidate_cache("get_room_version_id", (room_id,))
+        self._attempt_to_invalidate_cache("get_room_type", (room_id,))
+        self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
+
+        self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,))
 
         # And delete state caches.
 
diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index 4b66247640..bf6cfcbfd9 100644
--- a/synapse/storage/databases/main/client_ips.py
+++ b/synapse/storage/databases/main/client_ips.py
@@ -238,9 +238,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
                 INNER JOIN user_ips USING (user_id, access_token, ip)
                 GROUP BY user_id, access_token, ip
                 HAVING count(*) > 1
-                """.format(
-                    clause
-                ),
+                """.format(clause),
                 args,
             )
             res = cast(
@@ -373,9 +371,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
                     LIMIT ?
                 ) c
                 INNER JOIN user_ips AS u USING (user_id, device_id, last_seen)
-            """ % {
-                "where_clause": where_clause
-            }
+            """ % {"where_clause": where_clause}
             txn.execute(sql, where_args + [batch_size])
 
             rows = cast(List[Tuple[int, str, str, str, str]], txn.fetchall())
diff --git a/synapse/storage/databases/main/delayed_events.py b/synapse/storage/databases/main/delayed_events.py
new file mode 100644
index 0000000000..1616e30e22
--- /dev/null
+++ b/synapse/storage/databases/main/delayed_events.py
@@ -0,0 +1,537 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+import logging
+from typing import List, NewType, Optional, Tuple
+
+import attr
+
+from synapse.api.errors import NotFoundError
+from synapse.storage._base import SQLBaseStore, db_to_json
+from synapse.storage.database import LoggingTransaction, StoreError
+from synapse.storage.engines import PostgresEngine
+from synapse.types import JsonDict, RoomID
+from synapse.util import json_encoder, stringutils as stringutils
+
+logger = logging.getLogger(__name__)
+
+
+DelayID = NewType("DelayID", str)
+UserLocalpart = NewType("UserLocalpart", str)
+DeviceID = NewType("DeviceID", str)
+EventType = NewType("EventType", str)
+StateKey = NewType("StateKey", str)
+
+Delay = NewType("Delay", int)
+Timestamp = NewType("Timestamp", int)
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class EventDetails:
+    room_id: RoomID
+    type: EventType
+    state_key: Optional[StateKey]
+    origin_server_ts: Optional[Timestamp]
+    content: JsonDict
+    device_id: Optional[DeviceID]
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class DelayedEventDetails(EventDetails):
+    delay_id: DelayID
+    user_localpart: UserLocalpart
+
+
+class DelayedEventsStore(SQLBaseStore):
+    async def get_delayed_events_stream_pos(self) -> int:
+        """
+        Gets the stream position of the background process to watch for state events
+        that target the same piece of state as any pending delayed events.
+        """
+        return await self.db_pool.simple_select_one_onecol(
+            table="delayed_events_stream_pos",
+            keyvalues={},
+            retcol="stream_id",
+            desc="get_delayed_events_stream_pos",
+        )
+
+    async def update_delayed_events_stream_pos(self, stream_id: Optional[int]) -> None:
+        """
+        Updates the stream position of the background process to watch for state events
+        that target the same piece of state as any pending delayed events.
+
+        Must only be used by the worker running the background process.
+        """
+        await self.db_pool.simple_update_one(
+            table="delayed_events_stream_pos",
+            keyvalues={},
+            updatevalues={"stream_id": stream_id},
+            desc="update_delayed_events_stream_pos",
+        )
+
+    async def add_delayed_event(
+        self,
+        *,
+        user_localpart: str,
+        device_id: Optional[str],
+        creation_ts: Timestamp,
+        room_id: str,
+        event_type: str,
+        state_key: Optional[str],
+        origin_server_ts: Optional[int],
+        content: JsonDict,
+        delay: int,
+    ) -> Tuple[DelayID, Timestamp]:
+        """
+        Inserts a new delayed event in the DB.
+
+        Returns: The generated ID assigned to the added delayed event,
+            and the send time of the next delayed event to be sent,
+            which is either the event just added or one added earlier.
+        """
+        delay_id = _generate_delay_id()
+        send_ts = Timestamp(creation_ts + delay)
+
+        def add_delayed_event_txn(txn: LoggingTransaction) -> Timestamp:
+            self.db_pool.simple_insert_txn(
+                txn,
+                table="delayed_events",
+                values={
+                    "delay_id": delay_id,
+                    "user_localpart": user_localpart,
+                    "device_id": device_id,
+                    "delay": delay,
+                    "send_ts": send_ts,
+                    "room_id": room_id,
+                    "event_type": event_type,
+                    "state_key": state_key,
+                    "origin_server_ts": origin_server_ts,
+                    "content": json_encoder.encode(content),
+                },
+            )
+
+            next_send_ts = self._get_next_delayed_event_send_ts_txn(txn)
+            assert next_send_ts is not None
+            return next_send_ts
+
+        next_send_ts = await self.db_pool.runInteraction(
+            "add_delayed_event", add_delayed_event_txn
+        )
+
+        return delay_id, next_send_ts
+
+    async def restart_delayed_event(
+        self,
+        *,
+        delay_id: str,
+        user_localpart: str,
+        current_ts: Timestamp,
+    ) -> Timestamp:
+        """
+        Restarts the send time of the matching delayed event,
+        as long as it hasn't already been marked for processing.
+
+        Args:
+            delay_id: The ID of the delayed event to restart.
+            user_localpart: The localpart of the delayed event's owner.
+            current_ts: The current time, which will be used to calculate the new send time.
+
+        Returns: The send time of the next delayed event to be sent,
+            which is either the event just restarted, or another one
+            with an earlier send time than the restarted one's new send time.
+
+        Raises:
+            NotFoundError: if there is no matching delayed event.
+        """
+
+        def restart_delayed_event_txn(
+            txn: LoggingTransaction,
+        ) -> Timestamp:
+            txn.execute(
+                """
+                UPDATE delayed_events
+                SET send_ts = ? + delay
+                WHERE delay_id = ? AND user_localpart = ?
+                    AND NOT is_processed
+                """,
+                (
+                    current_ts,
+                    delay_id,
+                    user_localpart,
+                ),
+            )
+            if txn.rowcount == 0:
+                raise NotFoundError("Delayed event not found")
+
+            next_send_ts = self._get_next_delayed_event_send_ts_txn(txn)
+            assert next_send_ts is not None
+            return next_send_ts
+
+        return await self.db_pool.runInteraction(
+            "restart_delayed_event", restart_delayed_event_txn
+        )
+
+    async def get_all_delayed_events_for_user(
+        self,
+        user_localpart: str,
+    ) -> List[JsonDict]:
+        """Returns all pending delayed events owned by the given user."""
+        # TODO: Support Pagination stream API ("next_batch" field)
+        rows = await self.db_pool.execute(
+            "get_all_delayed_events_for_user",
+            """
+            SELECT
+                delay_id,
+                room_id,
+                event_type,
+                state_key,
+                delay,
+                send_ts,
+                content
+            FROM delayed_events
+            WHERE user_localpart = ? AND NOT is_processed
+            ORDER BY send_ts
+            """,
+            user_localpart,
+        )
+        return [
+            {
+                "delay_id": DelayID(row[0]),
+                "room_id": str(RoomID.from_string(row[1])),
+                "type": EventType(row[2]),
+                **({"state_key": StateKey(row[3])} if row[3] is not None else {}),
+                "delay": Delay(row[4]),
+                "running_since": Timestamp(row[5] - row[4]),
+                "content": db_to_json(row[6]),
+            }
+            for row in rows
+        ]
+
+    async def process_timeout_delayed_events(
+        self, current_ts: Timestamp
+    ) -> Tuple[
+        List[DelayedEventDetails],
+        Optional[Timestamp],
+    ]:
+        """
+        Marks for processing all delayed events that should have been sent prior to the provided time
+        that haven't already been marked as such.
+
+        Returns: The details of all newly-processed delayed events,
+            and the send time of the next delayed event to be sent, if any.
+        """
+
+        def process_timeout_delayed_events_txn(
+            txn: LoggingTransaction,
+        ) -> Tuple[
+            List[DelayedEventDetails],
+            Optional[Timestamp],
+        ]:
+            sql_cols = ", ".join(
+                (
+                    "delay_id",
+                    "user_localpart",
+                    "room_id",
+                    "event_type",
+                    "state_key",
+                    "origin_server_ts",
+                    "send_ts",
+                    "content",
+                    "device_id",
+                )
+            )
+            sql_update = "UPDATE delayed_events SET is_processed = TRUE"
+            sql_where = "WHERE send_ts <= ? AND NOT is_processed"
+            sql_args = (current_ts,)
+            sql_order = "ORDER BY send_ts"
+            if isinstance(self.database_engine, PostgresEngine):
+                # Do this only in Postgres because:
+                # - SQLite's RETURNING emits rows in an arbitrary order
+                #   - https://www.sqlite.org/lang_returning.html#limitations_and_caveats
+                # - SQLite does not support data-modifying statements in a WITH clause
+                #   - https://www.sqlite.org/lang_with.html
+                #   - https://www.postgresql.org/docs/current/queries-with.html#QUERIES-WITH-MODIFYING
+                txn.execute(
+                    f"""
+                    WITH events_to_send AS (
+                        {sql_update} {sql_where} RETURNING *
+                    ) SELECT {sql_cols} FROM events_to_send {sql_order}
+                    """,
+                    sql_args,
+                )
+                rows = txn.fetchall()
+            else:
+                txn.execute(
+                    f"SELECT {sql_cols} FROM delayed_events {sql_where} {sql_order}",
+                    sql_args,
+                )
+                rows = txn.fetchall()
+                txn.execute(f"{sql_update} {sql_where}", sql_args)
+                assert txn.rowcount == len(rows)
+
+            events = [
+                DelayedEventDetails(
+                    RoomID.from_string(row[2]),
+                    EventType(row[3]),
+                    StateKey(row[4]) if row[4] is not None else None,
+                    # If no custom_origin_ts is set, use send_ts as the event's timestamp
+                    Timestamp(row[5] if row[5] is not None else row[6]),
+                    db_to_json(row[7]),
+                    DeviceID(row[8]) if row[8] is not None else None,
+                    DelayID(row[0]),
+                    UserLocalpart(row[1]),
+                )
+                for row in rows
+            ]
+            next_send_ts = self._get_next_delayed_event_send_ts_txn(txn)
+            return events, next_send_ts
+
+        return await self.db_pool.runInteraction(
+            "process_timeout_delayed_events", process_timeout_delayed_events_txn
+        )
+
+    async def process_target_delayed_event(
+        self,
+        *,
+        delay_id: str,
+        user_localpart: str,
+    ) -> Tuple[
+        EventDetails,
+        Optional[Timestamp],
+    ]:
+        """
+        Marks for processing the matching delayed event, regardless of its timeout time,
+        as long as it has not already been marked as such.
+
+        Args:
+            delay_id: The ID of the delayed event to restart.
+            user_localpart: The localpart of the delayed event's owner.
+
+        Returns: The details of the matching delayed event,
+            and the send time of the next delayed event to be sent, if any.
+
+        Raises:
+            NotFoundError: if there is no matching delayed event.
+        """
+
+        def process_target_delayed_event_txn(
+            txn: LoggingTransaction,
+        ) -> Tuple[
+            EventDetails,
+            Optional[Timestamp],
+        ]:
+            sql_cols = ", ".join(
+                (
+                    "room_id",
+                    "event_type",
+                    "state_key",
+                    "origin_server_ts",
+                    "content",
+                    "device_id",
+                )
+            )
+            sql_update = "UPDATE delayed_events SET is_processed = TRUE"
+            sql_where = "WHERE delay_id = ? AND user_localpart = ? AND NOT is_processed"
+            sql_args = (delay_id, user_localpart)
+            txn.execute(
+                (
+                    f"{sql_update} {sql_where} RETURNING {sql_cols}"
+                    if self.database_engine.supports_returning
+                    else f"SELECT {sql_cols} FROM delayed_events {sql_where}"
+                ),
+                sql_args,
+            )
+            row = txn.fetchone()
+            if row is None:
+                raise NotFoundError("Delayed event not found")
+            elif not self.database_engine.supports_returning:
+                txn.execute(f"{sql_update} {sql_where}", sql_args)
+                assert txn.rowcount == 1
+
+            event = EventDetails(
+                RoomID.from_string(row[0]),
+                EventType(row[1]),
+                StateKey(row[2]) if row[2] is not None else None,
+                Timestamp(row[3]) if row[3] is not None else None,
+                db_to_json(row[4]),
+                DeviceID(row[5]) if row[5] is not None else None,
+            )
+
+            return event, self._get_next_delayed_event_send_ts_txn(txn)
+
+        return await self.db_pool.runInteraction(
+            "process_target_delayed_event", process_target_delayed_event_txn
+        )
+
+    async def cancel_delayed_event(
+        self,
+        *,
+        delay_id: str,
+        user_localpart: str,
+    ) -> Optional[Timestamp]:
+        """
+        Cancels the matching delayed event, i.e. remove it as long as it hasn't been processed.
+
+        Args:
+            delay_id: The ID of the delayed event to restart.
+            user_localpart: The localpart of the delayed event's owner.
+
+        Returns: The send time of the next delayed event to be sent, if any.
+
+        Raises:
+            NotFoundError: if there is no matching delayed event.
+        """
+
+        def cancel_delayed_event_txn(
+            txn: LoggingTransaction,
+        ) -> Optional[Timestamp]:
+            try:
+                self.db_pool.simple_delete_one_txn(
+                    txn,
+                    table="delayed_events",
+                    keyvalues={
+                        "delay_id": delay_id,
+                        "user_localpart": user_localpart,
+                        "is_processed": False,
+                    },
+                )
+            except StoreError:
+                if txn.rowcount == 0:
+                    raise NotFoundError("Delayed event not found")
+                else:
+                    raise
+
+            return self._get_next_delayed_event_send_ts_txn(txn)
+
+        return await self.db_pool.runInteraction(
+            "cancel_delayed_event", cancel_delayed_event_txn
+        )
+
+    async def cancel_delayed_state_events(
+        self,
+        *,
+        room_id: str,
+        event_type: str,
+        state_key: str,
+    ) -> Optional[Timestamp]:
+        """
+        Cancels all matching delayed state events, i.e. remove them as long as they haven't been processed.
+
+        Returns: The send time of the next delayed event to be sent, if any.
+        """
+
+        def cancel_delayed_state_events_txn(
+            txn: LoggingTransaction,
+        ) -> Optional[Timestamp]:
+            self.db_pool.simple_delete_txn(
+                txn,
+                table="delayed_events",
+                keyvalues={
+                    "room_id": room_id,
+                    "event_type": event_type,
+                    "state_key": state_key,
+                    "is_processed": False,
+                },
+            )
+            return self._get_next_delayed_event_send_ts_txn(txn)
+
+        return await self.db_pool.runInteraction(
+            "cancel_delayed_state_events", cancel_delayed_state_events_txn
+        )
+
+    async def delete_processed_delayed_event(
+        self,
+        delay_id: DelayID,
+        user_localpart: UserLocalpart,
+    ) -> None:
+        """
+        Delete the matching delayed event, as long as it has been marked as processed.
+
+        Throws:
+            StoreError: if there is no matching delayed event, or if it has not yet been processed.
+        """
+        return await self.db_pool.simple_delete_one(
+            table="delayed_events",
+            keyvalues={
+                "delay_id": delay_id,
+                "user_localpart": user_localpart,
+                "is_processed": True,
+            },
+            desc="delete_processed_delayed_event",
+        )
+
+    async def delete_processed_delayed_state_events(
+        self,
+        *,
+        room_id: str,
+        event_type: str,
+        state_key: str,
+    ) -> None:
+        """
+        Delete the matching delayed state events that have been marked as processed.
+        """
+        await self.db_pool.simple_delete(
+            table="delayed_events",
+            keyvalues={
+                "room_id": room_id,
+                "event_type": event_type,
+                "state_key": state_key,
+                "is_processed": True,
+            },
+            desc="delete_processed_delayed_state_events",
+        )
+
+    async def unprocess_delayed_events(self) -> None:
+        """
+        Unmark all delayed events for processing.
+        """
+        await self.db_pool.simple_update(
+            table="delayed_events",
+            keyvalues={"is_processed": True},
+            updatevalues={"is_processed": False},
+            desc="unprocess_delayed_events",
+        )
+
+    async def get_next_delayed_event_send_ts(self) -> Optional[Timestamp]:
+        """
+        Returns the send time of the next delayed event to be sent, if any.
+        """
+        return await self.db_pool.runInteraction(
+            "get_next_delayed_event_send_ts",
+            self._get_next_delayed_event_send_ts_txn,
+            db_autocommit=True,
+        )
+
+    def _get_next_delayed_event_send_ts_txn(
+        self, txn: LoggingTransaction
+    ) -> Optional[Timestamp]:
+        result = self.db_pool.simple_select_one_onecol_txn(
+            txn,
+            table="delayed_events",
+            keyvalues={"is_processed": False},
+            retcol="MIN(send_ts)",
+            allow_none=True,
+        )
+        return Timestamp(result) if result is not None else None
+
+
+def _generate_delay_id() -> DelayID:
+    """Generates an opaque string, for use as a delay ID"""
+
+    # We use the following format for delay IDs:
+    #    syd_<random string>
+    # They are scoped to user localparts, so it is possible for
+    # the same ID to exist for multiple users.
+
+    return DelayID(f"syd_{stringutils.random_string(20)}")
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 042d595ea0..0612b82b9b 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -1116,7 +1116,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
 
             txn.execute(sql, (start, stop))
 
-            destinations = {d for d, in txn}
+            destinations = {d for (d,) in txn}
             to_remove = set()
             for d in destinations:
                 try:
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 53024bddc3..a83df4075a 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -670,9 +670,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
                         result["keys"] = keys
 
                     device_display_name = None
-                    if (
-                        self.hs.config.federation.allow_device_name_lookup_over_federation
-                    ):
+                    if self.hs.config.federation.allow_device_name_lookup_over_federation:
                         device_display_name = device.display_name
                     if device_display_name:
                         result["device_display_name"] = device_display_name
@@ -917,7 +915,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
             from_key,
             to_key,
         )
-        return {u for u, in rows}
+        return {u for (u,) in rows}
 
     @cancellable
     async def get_users_whose_devices_changed(
@@ -968,7 +966,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
                     txn.database_engine, "user_id", chunk
                 )
                 txn.execute(sql % (clause,), [from_key, to_key] + args)
-                changes.update(user_id for user_id, in txn)
+                changes.update(user_id for (user_id,) in txn)
 
             return changes
 
@@ -1520,7 +1518,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
             args: List[Any],
         ) -> Set[str]:
             txn.execute(sql.format(clause=clause), args)
-            return {user_id for user_id, in txn}
+            return {user_id for (user_id,) in txn}
 
         changes = set()
         for chunk in batch_iter(changed_room_ids, 1000):
@@ -1560,7 +1558,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
             txn: LoggingTransaction,
         ) -> Set[str]:
             txn.execute(sql, (from_id, to_id))
-            return {room_id for room_id, in txn}
+            return {room_id for (room_id,) in txn}
 
         return await self.db_pool.runInteraction(
             "get_all_device_list_changes",
diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py
index 4d6a921ab2..c2c93e12d9 100644
--- a/synapse/storage/databases/main/e2e_room_keys.py
+++ b/synapse/storage/databases/main/e2e_room_keys.py
@@ -387,9 +387,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore):
                is_verified, session_data
         FROM e2e_room_keys
         WHERE user_id = ? AND version = ? AND (%s)
-        """ % (
-            " OR ".join(where_clauses)
-        )
+        """ % (" OR ".join(where_clauses))
 
         txn.execute(sql, params)
 
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 9e6c9561ae..575aaf498b 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -472,9 +472,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
         signature_sql = """
             SELECT user_id, key_id, target_device_id, signature
             FROM e2e_cross_signing_signatures WHERE %s
-            """ % (
-            " OR ".join("(" + q + ")" for q in signature_query_clauses)
-        )
+            """ % (" OR ".join("(" + q + ")" for q in signature_query_clauses))
 
         txn.execute(signature_sql, signature_query_params)
         return cast(
@@ -917,9 +915,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
                         FROM e2e_cross_signing_keys
                         WHERE %(clause)s
                         ORDER BY user_id, keytype, stream_id DESC
-                """ % {
-                    "clause": clause
-                }
+                """ % {"clause": clause}
             else:
                 # SQLite has special handling for bare columns when using
                 # MIN/MAX with a `GROUP BY` clause where it picks the value from
@@ -929,9 +925,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
                         FROM e2e_cross_signing_keys
                         WHERE %(clause)s
                         GROUP BY user_id, keytype
-                """ % {
-                    "clause": clause
-                }
+                """ % {"clause": clause}
 
             txn.execute(sql, params)
 
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index 24abab4a23..46aa5902d8 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -326,7 +326,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
             """
 
             rows = txn.execute_values(sql, chains.items())
-            results.update(r for r, in rows)
+            results.update(r for (r,) in rows)
         else:
             # For SQLite we just fall back to doing a noddy for loop.
             sql = """
@@ -335,7 +335,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
             """
             for chain_id, max_no in chains.items():
                 txn.execute(sql, (chain_id, max_no))
-                results.update(r for r, in txn)
+                results.update(r for (r,) in txn)
 
         return results
 
@@ -645,7 +645,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
             ]
 
             rows = txn.execute_values(sql, args)
-            result.update(r for r, in rows)
+            result.update(r for (r,) in rows)
         else:
             # For SQLite we just fall back to doing a noddy for loop.
             sql = """
@@ -654,7 +654,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
             """
             for chain_id, (min_no, max_no) in chain_to_gap.items():
                 txn.execute(sql, (chain_id, min_no, max_no))
-                result.update(r for r, in txn)
+                result.update(r for (r,) in txn)
 
         return result
 
@@ -1220,13 +1220,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
                 HAVING count(*) > ?
                 ORDER BY count(*) DESC
                 LIMIT ?
-            """ % (
-                where_clause,
-            )
+            """ % (where_clause,)
 
             query_args = list(itertools.chain(room_id_filter, [min_count, limit]))
             txn.execute(sql, query_args)
-            return [room_id for room_id, in txn]
+            return [room_id for (room_id,) in txn]
 
         return await self.db_pool.runInteraction(
             "get_rooms_with_many_extremities", _get_rooms_with_many_extremities_txn
@@ -1313,6 +1311,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
         # We want to make the cache more effective, so we clamp to the last
         # change before the given ordering.
         last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id)  # type: ignore[attr-defined]
+        if last_change is None:
+            # If the room isn't in the cache we know that the last change was
+            # somewhere before the earliest known position of the cache, so we
+            # can clamp to that.
+            last_change = self._events_stream_cache.get_earliest_known_position()  # type: ignore[attr-defined]
 
         # We don't always have a full stream_to_exterm_id table, e.g. after
         # the upgrade that introduced it, so we make sure we never ask for a
@@ -1353,7 +1356,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
 
         def get_forward_extremeties_for_room_txn(txn: LoggingTransaction) -> List[str]:
             txn.execute(sql, (stream_ordering, room_id))
-            return [event_id for event_id, in txn]
+            return [event_id for (event_id,) in txn]
 
         event_ids = await self.db_pool.runInteraction(
             "get_forward_extremeties_for_room", get_forward_extremeties_for_room_txn
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index 0ebf5b53d5..f42023418e 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -1860,9 +1860,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
                     AND epa.notif = 1
                 ORDER BY epa.stream_ordering DESC
                 LIMIT ?
-            """ % (
-                before_clause,
-            )
+            """ % (before_clause,)
             txn.execute(sql, args)
             return cast(
                 List[Tuple[str, str, int, int, str, bool, str, int]], txn.fetchall()
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 1f7acdb859..c0b7d8107d 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -32,6 +32,7 @@ from typing import (
     Iterable,
     List,
     Optional,
+    Sequence,
     Set,
     Tuple,
     cast,
@@ -39,19 +40,27 @@ from typing import (
 
 import attr
 from prometheus_client import Counter
+from typing_extensions import TypedDict
 
 import synapse.metrics
-from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
+from synapse.api.constants import (
+    EventContentFields,
+    EventTypes,
+    Membership,
+    RelationTypes,
+)
 from synapse.api.errors import PartialStateConflictError
 from synapse.api.room_versions import RoomVersions
-from synapse.events import EventBase, relation_from_event
+from synapse.events import EventBase, StrippedStateEvent, relation_from_event
 from synapse.events.snapshot import EventContext
+from synapse.events.utils import parse_stripped_state_event
 from synapse.logging.opentracing import trace
 from synapse.storage._base import db_to_json, make_in_list_sql_clause
 from synapse.storage.database import (
     DatabasePool,
     LoggingDatabaseConnection,
     LoggingTransaction,
+    make_tuple_in_list_sql_clause,
 )
 from synapse.storage.databases.main.event_federation import EventFederationStore
 from synapse.storage.databases.main.events_worker import EventCacheEntry
@@ -59,7 +68,15 @@ from synapse.storage.databases.main.search import SearchEntry
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.util.id_generators import AbstractStreamIdGenerator
 from synapse.storage.util.sequence import SequenceGenerator
-from synapse.types import JsonDict, StateMap, StrCollection, get_domain_from_id
+from synapse.types import (
+    JsonDict,
+    MutableStateMap,
+    StateMap,
+    StrCollection,
+    get_domain_from_id,
+)
+from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES
+from synapse.types.state import StateFilter
 from synapse.util import json_encoder
 from synapse.util.iterutils import batch_iter, sorted_topologically
 from synapse.util.stringutils import non_null_str_or_none
@@ -78,6 +95,19 @@ event_counter = Counter(
     ["type", "origin_type", "origin_entity"],
 )
 
+# State event type/key pairs that we need to gather to fill in the
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables.
+SLIDING_SYNC_RELEVANT_STATE_SET = (
+    # So we can fill in the `room_type` column
+    (EventTypes.Create, ""),
+    # So we can fill in the `is_encrypted` column
+    (EventTypes.RoomEncryption, ""),
+    # So we can fill in the `room_name` column
+    (EventTypes.Name, ""),
+    # So we can fill in the `tombstone_successor_room_id` column
+    (EventTypes.Tombstone, ""),
+)
+
 
 @attr.s(slots=True, auto_attribs=True)
 class DeltaState:
@@ -99,6 +129,80 @@ class DeltaState:
         return not self.to_delete and not self.to_insert and not self.no_longer_in_room
 
 
+# We want `total=False` because we want to allow values to be unset.
+class SlidingSyncStateInsertValues(TypedDict, total=False):
+    """
+    Insert values relevant for the `sliding_sync_joined_rooms` and
+    `sliding_sync_membership_snapshots` database tables.
+    """
+
+    room_type: Optional[str]
+    is_encrypted: Optional[bool]
+    room_name: Optional[str]
+    tombstone_successor_room_id: Optional[str]
+
+
+class SlidingSyncMembershipSnapshotSharedInsertValues(
+    SlidingSyncStateInsertValues, total=False
+):
+    """
+    Insert values for `sliding_sync_membership_snapshots` that we can share across
+    multiple memberships
+    """
+
+    has_known_state: Optional[bool]
+
+
+@attr.s(slots=True, auto_attribs=True)
+class SlidingSyncMembershipInfo:
+    """
+    Values unique to each membership
+    """
+
+    user_id: str
+    sender: str
+    membership_event_id: str
+    membership: str
+
+
+@attr.s(slots=True, auto_attribs=True)
+class SlidingSyncMembershipInfoWithEventPos(SlidingSyncMembershipInfo):
+    """
+    SlidingSyncMembershipInfo + `stream_ordering`/`instance_name` of the membership
+    event
+    """
+
+    membership_event_stream_ordering: int
+    membership_event_instance_name: str
+
+
+@attr.s(slots=True, auto_attribs=True)
+class SlidingSyncTableChanges:
+    room_id: str
+    # If the row doesn't exist in the `sliding_sync_joined_rooms` table, we need to
+    # fully-insert it which means we also need to include a `bump_stamp` value to use
+    # for the row. This should only be populated when we're trying to fully-insert a
+    # row.
+    #
+    # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+    # foreground update for
+    # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+    # https://github.com/element-hq/synapse/issues/17623)
+    joined_room_bump_stamp_to_fully_insert: Optional[int]
+    # Values to upsert into `sliding_sync_joined_rooms`
+    joined_room_updates: SlidingSyncStateInsertValues
+
+    # Shared values to upsert into `sliding_sync_membership_snapshots` for each
+    # `to_insert_membership_snapshots`
+    membership_snapshot_shared_insert_values: (
+        SlidingSyncMembershipSnapshotSharedInsertValues
+    )
+    # List of membership to insert into `sliding_sync_membership_snapshots`
+    to_insert_membership_snapshots: List[SlidingSyncMembershipInfo]
+    # List of user_id to delete from `sliding_sync_membership_snapshots`
+    to_delete_membership_snapshots: List[str]
+
+
 @attr.s(slots=True, auto_attribs=True)
 class NewEventChainLinks:
     """Information about new auth chain links that need to be added to the DB.
@@ -223,9 +327,24 @@ class PersistEventsStore:
 
         async with stream_ordering_manager as stream_orderings:
             for (event, _), stream in zip(events_and_contexts, stream_orderings):
+                # XXX: We can't rely on `stream_ordering`/`instance_name` being correct
+                # at this point. We could be working with events that were previously
+                # persisted as an `outlier` with one `stream_ordering` but are now being
+                # persisted again and de-outliered and are being assigned a different
+                # `stream_ordering` here that won't end up being used.
+                # `_update_outliers_txn()` will fix this discrepancy (always use the
+                # `stream_ordering` from the first time it was persisted).
                 event.internal_metadata.stream_ordering = stream
                 event.internal_metadata.instance_name = self._instance_name
 
+            sliding_sync_table_changes = None
+            if state_delta_for_room is not None:
+                sliding_sync_table_changes = (
+                    await self._calculate_sliding_sync_table_changes(
+                        room_id, events_and_contexts, state_delta_for_room
+                    )
+                )
+
             await self.db_pool.runInteraction(
                 "persist_events",
                 self._persist_events_txn,
@@ -235,6 +354,7 @@ class PersistEventsStore:
                 state_delta_for_room=state_delta_for_room,
                 new_forward_extremities=new_forward_extremities,
                 new_event_links=new_event_links,
+                sliding_sync_table_changes=sliding_sync_table_changes,
             )
             persist_event_counter.inc(len(events_and_contexts))
 
@@ -261,6 +381,301 @@ class PersistEventsStore:
                     (room_id,), frozenset(new_forward_extremities)
                 )
 
+    async def _calculate_sliding_sync_table_changes(
+        self,
+        room_id: str,
+        events_and_contexts: Sequence[Tuple[EventBase, EventContext]],
+        delta_state: DeltaState,
+    ) -> SlidingSyncTableChanges:
+        """
+        Calculate the changes to the `sliding_sync_membership_snapshots` and
+        `sliding_sync_joined_rooms` tables given the deltas that are going to be used to
+        update the `current_state_events` table.
+
+        Just a bunch of pre-processing so we so we don't need to spend time in the
+        transaction itself gathering all of this info. It's also easier to deal with
+        redactions outside of a transaction.
+
+        Args:
+            room_id: The room ID currently being processed.
+            events_and_contexts: List of tuples of (event, context) being persisted.
+                This is completely optional (you can pass an empty list) and will just
+                save us from fetching the events from the database if we already have
+                them. We assume the list is sorted ascending by `stream_ordering`. We
+                don't care about the sort when the events are backfilled (with negative
+                `stream_ordering`).
+            delta_state: Deltas that are going to be used to update the
+                `current_state_events` table. Changes to the current state of the room.
+
+        Returns:
+            SlidingSyncTableChanges
+        """
+        to_insert = delta_state.to_insert
+        to_delete = delta_state.to_delete
+
+        # If no state is changing, we don't need to do anything. This can happen when a
+        # partial-stated room is re-syncing the current state.
+        if not to_insert and not to_delete:
+            return SlidingSyncTableChanges(
+                room_id=room_id,
+                joined_room_bump_stamp_to_fully_insert=None,
+                joined_room_updates={},
+                membership_snapshot_shared_insert_values={},
+                to_insert_membership_snapshots=[],
+                to_delete_membership_snapshots=[],
+            )
+
+        event_map = {event.event_id: event for event, _ in events_and_contexts}
+
+        # Handle gathering info for the `sliding_sync_membership_snapshots` table
+        #
+        # This would only happen if someone was state reset out of the room
+        user_ids_to_delete_membership_snapshots = [
+            state_key
+            for event_type, state_key in to_delete
+            if event_type == EventTypes.Member and self.is_mine_id(state_key)
+        ]
+
+        membership_snapshot_shared_insert_values: SlidingSyncMembershipSnapshotSharedInsertValues = {}
+        membership_infos_to_insert_membership_snapshots: List[
+            SlidingSyncMembershipInfo
+        ] = []
+        if to_insert:
+            membership_event_id_to_user_id_map: Dict[str, str] = {}
+            for state_key, event_id in to_insert.items():
+                if state_key[0] == EventTypes.Member and self.is_mine_id(state_key[1]):
+                    membership_event_id_to_user_id_map[event_id] = state_key[1]
+
+            membership_event_map: Dict[str, EventBase] = {}
+            # In normal event persist scenarios, we should be able to find the
+            # membership events in the `events_and_contexts` given to us but it's
+            # possible a state reset happened which added us to the room without a
+            # corresponding new membership event (reset back to a previous membership).
+            missing_membership_event_ids: Set[str] = set()
+            for membership_event_id in membership_event_id_to_user_id_map.keys():
+                membership_event = event_map.get(membership_event_id)
+                if membership_event:
+                    membership_event_map[membership_event_id] = membership_event
+                else:
+                    missing_membership_event_ids.add(membership_event_id)
+
+            # Otherwise, we need to find a couple events that we were reset to.
+            if missing_membership_event_ids:
+                remaining_events = await self.store.get_events(
+                    missing_membership_event_ids
+                )
+                # There shouldn't be any missing events
+                assert (
+                    remaining_events.keys() == missing_membership_event_ids
+                ), missing_membership_event_ids.difference(remaining_events.keys())
+                membership_event_map.update(remaining_events)
+
+            for (
+                membership_event_id,
+                user_id,
+            ) in membership_event_id_to_user_id_map.items():
+                membership_infos_to_insert_membership_snapshots.append(
+                    # XXX: We don't use `SlidingSyncMembershipInfoWithEventPos` here
+                    # because we're sourcing the event from `events_and_contexts`, we
+                    # can't rely on `stream_ordering`/`instance_name` being correct at
+                    # this point. We could be working with events that were previously
+                    # persisted as an `outlier` with one `stream_ordering` but are now
+                    # being persisted again and de-outliered and assigned a different
+                    # `stream_ordering` that won't end up being used. Since we call
+                    # `_calculate_sliding_sync_table_changes()` before
+                    # `_update_outliers_txn()` which fixes this discrepancy (always use
+                    # the `stream_ordering` from the first time it was persisted), we're
+                    # working with an unreliable `stream_ordering` value that will
+                    # possibly be unused and not make it into the `events` table.
+                    SlidingSyncMembershipInfo(
+                        user_id=user_id,
+                        sender=membership_event_map[membership_event_id].sender,
+                        membership_event_id=membership_event_id,
+                        membership=membership_event_map[membership_event_id].membership,
+                    )
+                )
+
+            if membership_infos_to_insert_membership_snapshots:
+                current_state_ids_map: MutableStateMap[str] = dict(
+                    await self.store.get_partial_filtered_current_state_ids(
+                        room_id,
+                        state_filter=StateFilter.from_types(
+                            SLIDING_SYNC_RELEVANT_STATE_SET
+                        ),
+                    )
+                )
+                # Since we fetched the current state before we took `to_insert`/`to_delete`
+                # into account, we need to do a couple fixups.
+                #
+                # Update the current_state_map with what we have `to_delete`
+                for state_key in to_delete:
+                    current_state_ids_map.pop(state_key, None)
+                # Update the current_state_map with what we have `to_insert`
+                for state_key, event_id in to_insert.items():
+                    if state_key in SLIDING_SYNC_RELEVANT_STATE_SET:
+                        current_state_ids_map[state_key] = event_id
+
+                current_state_map: MutableStateMap[EventBase] = {}
+                # In normal event persist scenarios, we probably won't be able to find
+                # these state events in `events_and_contexts` since we don't generally
+                # batch up local membership changes with other events, but it can
+                # happen.
+                missing_state_event_ids: Set[str] = set()
+                for state_key, event_id in current_state_ids_map.items():
+                    event = event_map.get(event_id)
+                    if event:
+                        current_state_map[state_key] = event
+                    else:
+                        missing_state_event_ids.add(event_id)
+
+                # Otherwise, we need to find a couple events
+                if missing_state_event_ids:
+                    remaining_events = await self.store.get_events(
+                        missing_state_event_ids
+                    )
+                    # There shouldn't be any missing events
+                    assert (
+                        remaining_events.keys() == missing_state_event_ids
+                    ), missing_state_event_ids.difference(remaining_events.keys())
+                    for event in remaining_events.values():
+                        current_state_map[(event.type, event.state_key)] = event
+
+                if current_state_map:
+                    state_insert_values = PersistEventsStore._get_sliding_sync_insert_values_from_state_map(
+                        current_state_map
+                    )
+                    membership_snapshot_shared_insert_values.update(state_insert_values)
+                    # We have current state to work from
+                    membership_snapshot_shared_insert_values["has_known_state"] = True
+                else:
+                    # We don't have any `current_state_events` anymore (previously
+                    # cleared out because of `no_longer_in_room`). This can happen if
+                    # one user is joined and another is invited (some non-join
+                    # membership). If the joined user leaves, we are `no_longer_in_room`
+                    # and `current_state_events` is cleared out. When the invited user
+                    # rejects the invite (leaves the room), we will end up here.
+                    #
+                    # In these cases, we should inherit the meta data from the previous
+                    # snapshot so we shouldn't update any of the state values. When
+                    # using sliding sync filters, this will prevent the room from
+                    # disappearing/appearing just because you left the room.
+                    #
+                    # Ideally, we could additionally assert that we're only here for
+                    # valid non-join membership transitions.
+                    assert delta_state.no_longer_in_room
+
+        # Handle gathering info for the `sliding_sync_joined_rooms` table
+        #
+        # We only deal with
+        # updating the state related columns. The
+        # `event_stream_ordering`/`bump_stamp` are updated elsewhere in the event
+        # persisting stack (see
+        # `_update_sliding_sync_tables_with_new_persisted_events_txn()`)
+        #
+        joined_room_updates: SlidingSyncStateInsertValues = {}
+        bump_stamp_to_fully_insert: Optional[int] = None
+        if not delta_state.no_longer_in_room:
+            current_state_ids_map = {}
+
+            # Always fully-insert rows if they don't already exist in the
+            # `sliding_sync_joined_rooms` table. This way we can rely on a row if it
+            # exists in the table.
+            #
+            # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+            # foreground update for
+            # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+            # https://github.com/element-hq/synapse/issues/17623)
+            existing_row_in_table = await self.store.db_pool.simple_select_one_onecol(
+                table="sliding_sync_joined_rooms",
+                keyvalues={"room_id": room_id},
+                retcol="room_id",
+                allow_none=True,
+            )
+            if not existing_row_in_table:
+                most_recent_bump_event_pos_results = (
+                    await self.store.get_last_event_pos_in_room(
+                        room_id,
+                        event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES,
+                    )
+                )
+                if most_recent_bump_event_pos_results is not None:
+                    _, new_bump_event_pos = most_recent_bump_event_pos_results
+
+                    # If we've just joined a remote room, then the last bump event may
+                    # have been backfilled (and so have a negative stream ordering).
+                    # These negative stream orderings can't sensibly be compared, so
+                    # instead just leave it as `None` in the table and we will use their
+                    # membership event position as the bump event position in the
+                    # Sliding Sync API.
+                    if new_bump_event_pos.stream > 0:
+                        bump_stamp_to_fully_insert = new_bump_event_pos.stream
+
+                current_state_ids_map = dict(
+                    await self.store.get_partial_filtered_current_state_ids(
+                        room_id,
+                        state_filter=StateFilter.from_types(
+                            SLIDING_SYNC_RELEVANT_STATE_SET
+                        ),
+                    )
+                )
+
+            # Look through the items we're going to insert into the current state to see
+            # if there is anything that we care about and should also update in the
+            # `sliding_sync_joined_rooms` table.
+            for state_key, event_id in to_insert.items():
+                if state_key in SLIDING_SYNC_RELEVANT_STATE_SET:
+                    current_state_ids_map[state_key] = event_id
+
+            # Get the full event objects for the current state events
+            #
+            # In normal event persist scenarios, we should be able to find the state
+            # events in the `events_and_contexts` given to us but it's possible a state
+            # reset happened which that reset back to a previous state.
+            current_state_map = {}
+            missing_event_ids: Set[str] = set()
+            for state_key, event_id in current_state_ids_map.items():
+                event = event_map.get(event_id)
+                if event:
+                    current_state_map[state_key] = event
+                else:
+                    missing_event_ids.add(event_id)
+
+            # Otherwise, we need to find a couple events that we were reset to.
+            if missing_event_ids:
+                remaining_events = await self.store.get_events(missing_event_ids)
+                # There shouldn't be any missing events
+                assert (
+                    remaining_events.keys() == missing_event_ids
+                ), missing_event_ids.difference(remaining_events.keys())
+                for event in remaining_events.values():
+                    current_state_map[(event.type, event.state_key)] = event
+
+            joined_room_updates = (
+                PersistEventsStore._get_sliding_sync_insert_values_from_state_map(
+                    current_state_map
+                )
+            )
+
+            # If something is being deleted from the state, we need to clear it out
+            for state_key in to_delete:
+                if state_key == (EventTypes.Create, ""):
+                    joined_room_updates["room_type"] = None
+                elif state_key == (EventTypes.RoomEncryption, ""):
+                    joined_room_updates["is_encrypted"] = False
+                elif state_key == (EventTypes.Name, ""):
+                    joined_room_updates["room_name"] = None
+
+        return SlidingSyncTableChanges(
+            room_id=room_id,
+            # For `sliding_sync_joined_rooms`
+            joined_room_bump_stamp_to_fully_insert=bump_stamp_to_fully_insert,
+            joined_room_updates=joined_room_updates,
+            # For `sliding_sync_membership_snapshots`
+            membership_snapshot_shared_insert_values=membership_snapshot_shared_insert_values,
+            to_insert_membership_snapshots=membership_infos_to_insert_membership_snapshots,
+            to_delete_membership_snapshots=user_ids_to_delete_membership_snapshots,
+        )
+
     async def calculate_chain_cover_index_for_events(
         self, room_id: str, events: Collection[EventBase]
     ) -> Dict[str, NewEventChainLinks]:
@@ -315,7 +730,7 @@ class PersistEventsStore:
             keyvalues={},
             retcols=("event_id",),
         )
-        already_persisted_events = {event_id for event_id, in rows}
+        already_persisted_events = {event_id for (event_id,) in rows}
         state_events = [
             event
             for event in state_events
@@ -458,6 +873,7 @@ class PersistEventsStore:
         state_delta_for_room: Optional[DeltaState],
         new_forward_extremities: Optional[Set[str]],
         new_event_links: Dict[str, NewEventChainLinks],
+        sliding_sync_table_changes: Optional[SlidingSyncTableChanges],
     ) -> None:
         """Insert some number of room events into the necessary database tables.
 
@@ -478,9 +894,14 @@ class PersistEventsStore:
             delete_existing True to purge existing table rows for the events
                 from the database. This is useful when retrying due to
                 IntegrityError.
-            state_delta_for_room: The current-state delta for the room.
+            state_delta_for_room: Deltas that are going to be used to update the
+                `current_state_events` table. Changes to the current state of the room.
             new_forward_extremities: The new forward extremities for the room:
                 a set of the event ids which are the forward extremities.
+            sliding_sync_table_changes: Changes to the
+                `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` tables
+                derived from the given `delta_state` (see
+                `_calculate_sliding_sync_table_changes(...)`)
 
         Raises:
             PartialStateConflictError: if attempting to persist a partial state event in
@@ -590,10 +1011,22 @@ class PersistEventsStore:
         # room_memberships, where applicable.
         # NB: This function invalidates all state related caches
         if state_delta_for_room:
+            # If the state delta exists, the sliding sync table changes should also exist
+            assert sliding_sync_table_changes is not None
+
             self._update_current_state_txn(
-                txn, room_id, state_delta_for_room, min_stream_order
+                txn,
+                room_id,
+                state_delta_for_room,
+                min_stream_order,
+                sliding_sync_table_changes,
             )
 
+        # We only update the sliding sync tables for non-backfilled events.
+        self._update_sliding_sync_tables_with_new_persisted_events_txn(
+            txn, room_id, events_and_contexts
+        )
+
     def _persist_event_auth_chain_txn(
         self,
         txn: LoggingTransaction,
@@ -1128,8 +1561,20 @@ class PersistEventsStore:
         self,
         room_id: str,
         state_delta: DeltaState,
+        sliding_sync_table_changes: SlidingSyncTableChanges,
     ) -> None:
-        """Update the current state stored in the datatabase for the given room"""
+        """
+        Update the current state stored in the datatabase for the given room
+
+        Args:
+            room_id
+            state_delta: Deltas that are going to be used to update the
+                `current_state_events` table. Changes to the current state of the room.
+            sliding_sync_table_changes: Changes to the
+                `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` tables
+                derived from the given `delta_state` (see
+                `_calculate_sliding_sync_table_changes(...)`)
+        """
 
         if state_delta.is_noop():
             return
@@ -1141,6 +1586,7 @@ class PersistEventsStore:
                 room_id,
                 delta_state=state_delta,
                 stream_id=stream_ordering,
+                sliding_sync_table_changes=sliding_sync_table_changes,
             )
 
     def _update_current_state_txn(
@@ -1149,16 +1595,34 @@ class PersistEventsStore:
         room_id: str,
         delta_state: DeltaState,
         stream_id: int,
+        sliding_sync_table_changes: SlidingSyncTableChanges,
     ) -> None:
+        """
+        Handles updating tables that track the current state of a room.
+
+        Args:
+            txn
+            room_id
+            delta_state: Deltas that are going to be used to update the
+                `current_state_events` table. Changes to the current state of the room.
+            stream_id: TODO
+            sliding_sync_table_changes: Changes to the
+                `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` tables
+                derived from the given `delta_state` (see
+                `_calculate_sliding_sync_table_changes(...)`)
+        """
         to_delete = delta_state.to_delete
         to_insert = delta_state.to_insert
 
+        # Sanity check we're processing the same thing
+        assert room_id == sliding_sync_table_changes.room_id
+
         # Figure out the changes of membership to invalidate the
         # `get_rooms_for_user` cache.
         # We find out which membership events we may have deleted
         # and which we have added, then we invalidate the caches for all
         # those users.
-        members_changed = {
+        members_to_cache_bust = {
             state_key
             for ev_type, state_key in itertools.chain(to_delete, to_insert)
             if ev_type == EventTypes.Member
@@ -1182,16 +1646,22 @@ class PersistEventsStore:
                 """
             txn.execute(sql, (stream_id, self._instance_name, room_id))
 
+            # Grab the list of users before we clear out the current state
+            users_in_room = self.store.get_users_in_room_txn(txn, room_id)
             # We also want to invalidate the membership caches for users
             # that were in the room.
-            users_in_room = self.store.get_users_in_room_txn(txn, room_id)
-            members_changed.update(users_in_room)
+            members_to_cache_bust.update(users_in_room)
 
             self.db_pool.simple_delete_txn(
                 txn,
                 table="current_state_events",
                 keyvalues={"room_id": room_id},
             )
+            self.db_pool.simple_delete_txn(
+                txn,
+                table="sliding_sync_joined_rooms",
+                keyvalues={"room_id": room_id},
+            )
         else:
             # We're still in the room, so we update the current state as normal.
 
@@ -1260,6 +1730,63 @@ class PersistEventsStore:
                 ],
             )
 
+            # Handle updating the `sliding_sync_joined_rooms` table. We only deal with
+            # updating the state related columns. The
+            # `event_stream_ordering`/`bump_stamp` are updated elsewhere in the event
+            # persisting stack (see
+            # `_update_sliding_sync_tables_with_new_persisted_events_txn()`)
+            #
+            # We only need to update when one of the relevant state values has changed
+            if sliding_sync_table_changes.joined_room_updates:
+                sliding_sync_updates_keys = (
+                    sliding_sync_table_changes.joined_room_updates.keys()
+                )
+                sliding_sync_updates_values = (
+                    sliding_sync_table_changes.joined_room_updates.values()
+                )
+
+                args: List[Any] = [
+                    room_id,
+                    room_id,
+                    sliding_sync_table_changes.joined_room_bump_stamp_to_fully_insert,
+                ]
+                args.extend(iter(sliding_sync_updates_values))
+
+                # XXX: We use a sub-query for `stream_ordering` because it's unreliable to
+                # pre-calculate from `events_and_contexts` at the time when
+                # `_calculate_sliding_sync_table_changes()` is ran. We could be working
+                # with events that were previously persisted as an `outlier` with one
+                # `stream_ordering` but are now being persisted again and de-outliered
+                # and assigned a different `stream_ordering`. Since we call
+                # `_calculate_sliding_sync_table_changes()` before
+                # `_update_outliers_txn()` which fixes this discrepancy (always use the
+                # `stream_ordering` from the first time it was persisted), we're working
+                # with an unreliable `stream_ordering` value that will possibly be
+                # unused and not make it into the `events` table.
+                #
+                # We don't update `event_stream_ordering` `ON CONFLICT` because it's
+                # simpler and we can just rely on
+                # `_update_sliding_sync_tables_with_new_persisted_events_txn()` to do
+                # the right thing (same for `bump_stamp`). The only reason we're
+                # inserting `event_stream_ordering` here is because the column has a
+                # `NON NULL` constraint and we need some answer.
+                txn.execute(
+                    f"""
+                    INSERT INTO sliding_sync_joined_rooms
+                        (room_id, event_stream_ordering, bump_stamp, {", ".join(sliding_sync_updates_keys)})
+                    VALUES (
+                        ?,
+                        (SELECT stream_ordering FROM events WHERE room_id = ? ORDER BY stream_ordering DESC LIMIT 1),
+                        ?,
+                        {", ".join("?" for _ in sliding_sync_updates_values)}
+                    )
+                    ON CONFLICT (room_id)
+                    DO UPDATE SET
+                        {", ".join(f"{key} = EXCLUDED.{key}" for key in sliding_sync_updates_keys)}
+                    """,
+                    args,
+                )
+
         # We now update `local_current_membership`. We do this regardless
         # of whether we're still in the room or not to handle the case where
         # e.g. we just got banned (where we need to record that fact here).
@@ -1296,6 +1823,81 @@ class PersistEventsStore:
                 ],
             )
 
+        # Handle updating the `sliding_sync_membership_snapshots` table
+        #
+        # This would only happen if someone was state reset out of the room
+        if sliding_sync_table_changes.to_delete_membership_snapshots:
+            self.db_pool.simple_delete_many_txn(
+                txn,
+                table="sliding_sync_membership_snapshots",
+                column="user_id",
+                values=sliding_sync_table_changes.to_delete_membership_snapshots,
+                keyvalues={"room_id": room_id},
+            )
+
+        # We do this regardless of whether the server is `no_longer_in_room` or not
+        # because we still want a row if a local user was just left/kicked or got banned
+        # from the room.
+        if sliding_sync_table_changes.to_insert_membership_snapshots:
+            # Update the `sliding_sync_membership_snapshots` table
+            #
+            sliding_sync_snapshot_keys = sliding_sync_table_changes.membership_snapshot_shared_insert_values.keys()
+            sliding_sync_snapshot_values = sliding_sync_table_changes.membership_snapshot_shared_insert_values.values()
+            # We need to insert/update regardless of whether we have
+            # `sliding_sync_snapshot_keys` because there are other fields in the `ON
+            # CONFLICT` upsert to run (see inherit case (explained in
+            # `_calculate_sliding_sync_table_changes()`) for more context when this
+            # happens).
+            #
+            # XXX: We use a sub-query for `stream_ordering` because it's unreliable to
+            # pre-calculate from `events_and_contexts` at the time when
+            # `_calculate_sliding_sync_table_changes()` is ran. We could be working with
+            # events that were previously persisted as an `outlier` with one
+            # `stream_ordering` but are now being persisted again and de-outliered and
+            # assigned a different `stream_ordering` that won't end up being used. Since
+            # we call `_calculate_sliding_sync_table_changes()` before
+            # `_update_outliers_txn()` which fixes this discrepancy (always use the
+            # `stream_ordering` from the first time it was persisted), we're working
+            # with an unreliable `stream_ordering` value that will possibly be unused
+            # and not make it into the `events` table.
+            txn.execute_batch(
+                f"""
+                INSERT INTO sliding_sync_membership_snapshots
+                    (room_id, user_id, sender, membership_event_id, membership, event_stream_ordering, event_instance_name
+                    {("," + ", ".join(sliding_sync_snapshot_keys)) if sliding_sync_snapshot_keys else ""})
+                VALUES (
+                    ?, ?, ?, ?, ?,
+                    (SELECT stream_ordering FROM events WHERE event_id = ?),
+                    (SELECT COALESCE(instance_name, 'master') FROM events WHERE event_id = ?)
+                    {("," + ", ".join("?" for _ in sliding_sync_snapshot_values)) if sliding_sync_snapshot_values else ""}
+                )
+                ON CONFLICT (room_id, user_id)
+                DO UPDATE SET
+                    sender = EXCLUDED.sender,
+                    membership_event_id = EXCLUDED.membership_event_id,
+                    membership = EXCLUDED.membership,
+                    event_stream_ordering = EXCLUDED.event_stream_ordering
+                    {("," + ", ".join(f"{key} = EXCLUDED.{key}" for key in sliding_sync_snapshot_keys)) if sliding_sync_snapshot_keys else ""}
+                """,
+                [
+                    [
+                        room_id,
+                        membership_info.user_id,
+                        membership_info.sender,
+                        membership_info.membership_event_id,
+                        membership_info.membership,
+                        # XXX: We do not use `membership_info.membership_event_stream_ordering` here
+                        # because it is an unreliable value. See XXX note above.
+                        membership_info.membership_event_id,
+                        # XXX: We do not use `membership_info.membership_event_instance_name` here
+                        # because it is an unreliable value. See XXX note above.
+                        membership_info.membership_event_id,
+                    ]
+                    + list(sliding_sync_snapshot_values)
+                    for membership_info in sliding_sync_table_changes.to_insert_membership_snapshots
+                ],
+            )
+
         txn.call_after(
             self.store._curr_state_delta_stream_cache.entity_has_changed,
             room_id,
@@ -1303,14 +1905,330 @@ class PersistEventsStore:
         )
 
         # Invalidate the various caches
-        self.store._invalidate_state_caches_and_stream(txn, room_id, members_changed)
+        self.store._invalidate_state_caches_and_stream(
+            txn, room_id, members_to_cache_bust
+        )
 
         # Check if any of the remote membership changes requires us to
         # unsubscribe from their device lists.
         self.store.handle_potentially_left_users_txn(
-            txn, {m for m in members_changed if not self.hs.is_mine_id(m)}
+            txn, {m for m in members_to_cache_bust if not self.hs.is_mine_id(m)}
         )
 
+    @classmethod
+    def _get_relevant_sliding_sync_current_state_event_ids_txn(
+        cls, txn: LoggingTransaction, room_id: str
+    ) -> MutableStateMap[str]:
+        """
+        Fetch the current state event IDs for the relevant (to the
+        `sliding_sync_joined_rooms` table) state types for the given room.
+
+        Returns:
+            A tuple of:
+                1. StateMap of event IDs necessary to to fetch the relevant state values
+                   needed to insert into the
+                   `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots`.
+                2. The corresponding latest `stream_id` in the
+                   `current_state_delta_stream` table. This is useful to compare against
+                   the `current_state_delta_stream` table later so you can check whether
+                   the current state has changed since you last fetched the current
+                   state.
+        """
+        # Fetch the current state event IDs from the database
+        (
+            event_type_and_state_key_in_list_clause,
+            event_type_and_state_key_args,
+        ) = make_tuple_in_list_sql_clause(
+            txn.database_engine,
+            ("type", "state_key"),
+            SLIDING_SYNC_RELEVANT_STATE_SET,
+        )
+        txn.execute(
+            f"""
+            SELECT c.event_id, c.type, c.state_key
+            FROM current_state_events AS c
+            WHERE
+                c.room_id = ?
+                AND {event_type_and_state_key_in_list_clause}
+            """,
+            [room_id] + event_type_and_state_key_args,
+        )
+        current_state_map: MutableStateMap[str] = {
+            (event_type, state_key): event_id for event_id, event_type, state_key in txn
+        }
+
+        return current_state_map
+
+    @classmethod
+    def _get_sliding_sync_insert_values_from_state_map(
+        cls, state_map: StateMap[EventBase]
+    ) -> SlidingSyncStateInsertValues:
+        """
+        Extract the relevant state values from the `state_map` needed to insert into the
+        `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables.
+
+        Returns:
+            Map from column names (`room_type`, `is_encrypted`, `room_name`) to relevant
+            state values needed to insert into
+            the `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables.
+        """
+        # Map of values to insert/update in the `sliding_sync_membership_snapshots` table
+        sliding_sync_insert_map: SlidingSyncStateInsertValues = {}
+
+        # Parse the raw event JSON
+        for state_key, event in state_map.items():
+            if state_key == (EventTypes.Create, ""):
+                room_type = event.content.get(EventContentFields.ROOM_TYPE)
+                # Scrutinize JSON values
+                if room_type is None or (
+                    isinstance(room_type, str)
+                    # We ignore values with null bytes as Postgres doesn't allow them in
+                    # text columns.
+                    and "\0" not in room_type
+                ):
+                    sliding_sync_insert_map["room_type"] = room_type
+            elif state_key == (EventTypes.RoomEncryption, ""):
+                encryption_algorithm = event.content.get(
+                    EventContentFields.ENCRYPTION_ALGORITHM
+                )
+                is_encrypted = encryption_algorithm is not None
+                sliding_sync_insert_map["is_encrypted"] = is_encrypted
+            elif state_key == (EventTypes.Name, ""):
+                room_name = event.content.get(EventContentFields.ROOM_NAME)
+                # Scrutinize JSON values. We ignore values with nulls as
+                # postgres doesn't allow null bytes in text columns.
+                if room_name is None or (
+                    isinstance(room_name, str)
+                    # We ignore values with null bytes as Postgres doesn't allow them in
+                    # text columns.
+                    and "\0" not in room_name
+                ):
+                    sliding_sync_insert_map["room_name"] = room_name
+            elif state_key == (EventTypes.Tombstone, ""):
+                successor_room_id = event.content.get(
+                    EventContentFields.TOMBSTONE_SUCCESSOR_ROOM
+                )
+                # Scrutinize JSON values
+                if successor_room_id is None or (
+                    isinstance(successor_room_id, str)
+                    # We ignore values with null bytes as Postgres doesn't allow them in
+                    # text columns.
+                    and "\0" not in successor_room_id
+                ):
+                    sliding_sync_insert_map["tombstone_successor_room_id"] = (
+                        successor_room_id
+                    )
+            else:
+                # We only expect to see events according to the
+                # `SLIDING_SYNC_RELEVANT_STATE_SET`.
+                raise AssertionError(
+                    "Unexpected event (we should not be fetching extra events or this "
+                    + "piece of code needs to be updated to handle a new event type added "
+                    + "to `SLIDING_SYNC_RELEVANT_STATE_SET`): {state_key} {event.event_id}"
+                )
+
+        return sliding_sync_insert_map
+
+    @classmethod
+    def _get_sliding_sync_insert_values_from_stripped_state(
+        cls, unsigned_stripped_state_events: Any
+    ) -> SlidingSyncMembershipSnapshotSharedInsertValues:
+        """
+        Pull out the relevant state values from the stripped state on an invite or knock
+        membership event needed to insert into the `sliding_sync_membership_snapshots`
+        tables.
+
+        Returns:
+            Map from column names (`room_type`, `is_encrypted`, `room_name`) to relevant
+            state values needed to insert into the `sliding_sync_membership_snapshots` tables.
+        """
+        # Map of values to insert/update in the `sliding_sync_membership_snapshots` table
+        sliding_sync_insert_map: SlidingSyncMembershipSnapshotSharedInsertValues = {}
+
+        if unsigned_stripped_state_events is not None:
+            stripped_state_map: MutableStateMap[StrippedStateEvent] = {}
+            if isinstance(unsigned_stripped_state_events, list):
+                for raw_stripped_event in unsigned_stripped_state_events:
+                    stripped_state_event = parse_stripped_state_event(
+                        raw_stripped_event
+                    )
+                    if stripped_state_event is not None:
+                        stripped_state_map[
+                            (
+                                stripped_state_event.type,
+                                stripped_state_event.state_key,
+                            )
+                        ] = stripped_state_event
+
+            # If there is some stripped state, we assume the remote server passed *all*
+            # of the potential stripped state events for the room.
+            create_stripped_event = stripped_state_map.get((EventTypes.Create, ""))
+            # Sanity check that we at-least have the create event
+            if create_stripped_event is not None:
+                sliding_sync_insert_map["has_known_state"] = True
+
+                # XXX: Keep this up-to-date with `SLIDING_SYNC_RELEVANT_STATE_SET`
+
+                # Find the room_type
+                sliding_sync_insert_map["room_type"] = (
+                    create_stripped_event.content.get(EventContentFields.ROOM_TYPE)
+                    if create_stripped_event is not None
+                    else None
+                )
+
+                # Find whether the room is_encrypted
+                encryption_stripped_event = stripped_state_map.get(
+                    (EventTypes.RoomEncryption, "")
+                )
+                encryption = (
+                    encryption_stripped_event.content.get(
+                        EventContentFields.ENCRYPTION_ALGORITHM
+                    )
+                    if encryption_stripped_event is not None
+                    else None
+                )
+                sliding_sync_insert_map["is_encrypted"] = encryption is not None
+
+                # Find the room_name
+                room_name_stripped_event = stripped_state_map.get((EventTypes.Name, ""))
+                sliding_sync_insert_map["room_name"] = (
+                    room_name_stripped_event.content.get(EventContentFields.ROOM_NAME)
+                    if room_name_stripped_event is not None
+                    else None
+                )
+
+                # Check for null bytes in the room name and type. We have to
+                # ignore values with null bytes as Postgres doesn't allow them
+                # in text columns.
+                if (
+                    sliding_sync_insert_map["room_name"] is not None
+                    and "\0" in sliding_sync_insert_map["room_name"]
+                ):
+                    sliding_sync_insert_map.pop("room_name")
+
+                if (
+                    sliding_sync_insert_map["room_type"] is not None
+                    and "\0" in sliding_sync_insert_map["room_type"]
+                ):
+                    sliding_sync_insert_map.pop("room_type")
+
+                # Find the tombstone_successor_room_id
+                # Note: This isn't one of the stripped state events according to the spec
+                # but seems like there is no reason not to support this kind of thing.
+                tombstone_stripped_event = stripped_state_map.get(
+                    (EventTypes.Tombstone, "")
+                )
+                sliding_sync_insert_map["tombstone_successor_room_id"] = (
+                    tombstone_stripped_event.content.get(
+                        EventContentFields.TOMBSTONE_SUCCESSOR_ROOM
+                    )
+                    if tombstone_stripped_event is not None
+                    else None
+                )
+
+                if (
+                    sliding_sync_insert_map["tombstone_successor_room_id"] is not None
+                    and "\0" in sliding_sync_insert_map["tombstone_successor_room_id"]
+                ):
+                    sliding_sync_insert_map.pop("tombstone_successor_room_id")
+
+            else:
+                # No stripped state provided
+                sliding_sync_insert_map["has_known_state"] = False
+                sliding_sync_insert_map["room_type"] = None
+                sliding_sync_insert_map["room_name"] = None
+                sliding_sync_insert_map["is_encrypted"] = False
+        else:
+            # No stripped state provided
+            sliding_sync_insert_map["has_known_state"] = False
+            sliding_sync_insert_map["room_type"] = None
+            sliding_sync_insert_map["room_name"] = None
+            sliding_sync_insert_map["is_encrypted"] = False
+
+        return sliding_sync_insert_map
+
+    def _update_sliding_sync_tables_with_new_persisted_events_txn(
+        self,
+        txn: LoggingTransaction,
+        room_id: str,
+        events_and_contexts: List[Tuple[EventBase, EventContext]],
+    ) -> None:
+        """
+        Update the latest `event_stream_ordering`/`bump_stamp` columns in the
+        `sliding_sync_joined_rooms` table for the room with new events.
+
+        This function assumes that `_store_event_txn()` (to persist the event) and
+        `_update_current_state_txn(...)` (so that `sliding_sync_joined_rooms` table has
+        been updated with rooms that were joined) have already been run.
+
+        Args:
+            txn
+            room_id: The room that all of the events belong to
+            events_and_contexts: The events being persisted. We assume the list is
+                sorted ascending by `stream_ordering`. We don't care about the sort when the
+                events are backfilled (with negative `stream_ordering`).
+        """
+
+        # Nothing to do if there are no events
+        if len(events_and_contexts) == 0:
+            return
+
+        # Since the list is sorted ascending by `stream_ordering`, the last event should
+        # have the highest `stream_ordering`.
+        max_stream_ordering = events_and_contexts[-1][
+            0
+        ].internal_metadata.stream_ordering
+        # `stream_ordering` should be assigned for persisted events
+        assert max_stream_ordering is not None
+        # Check if the event is a backfilled event (with a negative `stream_ordering`).
+        # If one event is backfilled, we assume this whole batch was backfilled.
+        if max_stream_ordering < 0:
+            # We only update the sliding sync tables for non-backfilled events.
+            return
+
+        max_bump_stamp = None
+        for event, _ in reversed(events_and_contexts):
+            # Sanity check that all events belong to the same room
+            assert event.room_id == room_id
+
+            if event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES:
+                # `stream_ordering` should be assigned for persisted events
+                assert event.internal_metadata.stream_ordering is not None
+
+                max_bump_stamp = event.internal_metadata.stream_ordering
+
+                # Since we're iterating in reverse, we can break as soon as we find a
+                # matching bump event which should have the highest `stream_ordering`.
+                break
+
+        # Handle updating the `sliding_sync_joined_rooms` table.
+        #
+        txn.execute(
+            """
+            UPDATE sliding_sync_joined_rooms
+            SET
+                event_stream_ordering = CASE
+                    WHEN event_stream_ordering IS NULL OR event_stream_ordering < ?
+                        THEN ?
+                    ELSE event_stream_ordering
+                END,
+                bump_stamp = CASE
+                    WHEN bump_stamp IS NULL OR bump_stamp < ?
+                        THEN ?
+                    ELSE bump_stamp
+                END
+            WHERE room_id = ?
+            """,
+            (
+                max_stream_ordering,
+                max_stream_ordering,
+                max_bump_stamp,
+                max_bump_stamp,
+                room_id,
+            ),
+        )
+        # This may or may not update any rows depending if we are `no_longer_in_room`
+
     def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str) -> None:
         """Update the room version in the database based off current state
         events.
@@ -1931,7 +2849,9 @@ class PersistEventsStore:
         )
 
         for event in events:
+            # Sanity check that we're working with persisted events
             assert event.internal_metadata.stream_ordering is not None
+            assert event.internal_metadata.instance_name is not None
 
             # We update the local_current_membership table only if the event is
             # "current", i.e., its something that has just happened.
@@ -1945,6 +2865,16 @@ class PersistEventsStore:
                 and event.internal_metadata.is_outlier()
                 and event.internal_metadata.is_out_of_band_membership()
             ):
+                # The only sort of out-of-band-membership events we expect to see here
+                # are remote invites/knocks and LEAVE events corresponding to
+                # rejected/retracted invites and rescinded knocks.
+                assert event.type == EventTypes.Member
+                assert event.membership in (
+                    Membership.INVITE,
+                    Membership.KNOCK,
+                    Membership.LEAVE,
+                )
+
                 self.db_pool.simple_upsert_txn(
                     txn,
                     table="local_current_membership",
@@ -1956,6 +2886,56 @@ class PersistEventsStore:
                     },
                 )
 
+                # Handle updating the `sliding_sync_membership_snapshots` table
+                # (out-of-band membership events only)
+                #
+                raw_stripped_state_events = None
+                if event.membership == Membership.INVITE:
+                    invite_room_state = event.unsigned.get("invite_room_state")
+                    raw_stripped_state_events = invite_room_state
+                elif event.membership == Membership.KNOCK:
+                    knock_room_state = event.unsigned.get("knock_room_state")
+                    raw_stripped_state_events = knock_room_state
+
+                insert_values = {
+                    "sender": event.sender,
+                    "membership_event_id": event.event_id,
+                    "membership": event.membership,
+                    "event_stream_ordering": event.internal_metadata.stream_ordering,
+                    "event_instance_name": event.internal_metadata.instance_name,
+                }
+                if event.membership == Membership.LEAVE:
+                    # Inherit the meta data from the remote invite/knock. When using
+                    # sliding sync filters, this will prevent the room from
+                    # disappearing/appearing just because you left the room.
+                    pass
+                elif event.membership in (Membership.INVITE, Membership.KNOCK):
+                    extra_insert_values = (
+                        self._get_sliding_sync_insert_values_from_stripped_state(
+                            raw_stripped_state_events
+                        )
+                    )
+                    insert_values.update(extra_insert_values)
+                else:
+                    # We don't know how to handle this type of membership yet
+                    #
+                    # FIXME: We should use `assert_never` here but for some reason
+                    # the exhaustive matching doesn't recognize the `Never` here.
+                    # assert_never(event.membership)
+                    raise AssertionError(
+                        f"Unexpected out-of-band membership {event.membership} ({event.event_id}) that we don't know how to handle yet"
+                    )
+
+                self.db_pool.simple_upsert_txn(
+                    txn,
+                    table="sliding_sync_membership_snapshots",
+                    keyvalues={
+                        "room_id": event.room_id,
+                        "user_id": event.state_key,
+                    },
+                    values=insert_values,
+                )
+
     def _handle_event_relations(
         self, txn: LoggingTransaction, event: EventBase
     ) -> None:
@@ -2415,7 +3395,7 @@ class PersistEventsStore:
         )
 
         potential_backwards_extremities.difference_update(
-            e for e, in existing_events_outliers
+            e for (e,) in existing_events_outliers
         )
 
         if potential_backwards_extremities:
diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
index 64d303e330..a8723f94bc 100644
--- a/synapse/storage/databases/main/events_bg_updates.py
+++ b/synapse/storage/databases/main/events_bg_updates.py
@@ -24,9 +24,9 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast
 
 import attr
 
-from synapse.api.constants import EventContentFields, RelationTypes
+from synapse.api.constants import EventContentFields, Membership, RelationTypes
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
-from synapse.events import make_event_from_dict
+from synapse.events import EventBase, make_event_from_dict
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
 from synapse.storage.database import (
     DatabasePool,
@@ -34,9 +34,27 @@ from synapse.storage.database import (
     LoggingTransaction,
     make_tuple_comparison_clause,
 )
-from synapse.storage.databases.main.events import PersistEventsStore
+from synapse.storage.databases.main.events import (
+    SLIDING_SYNC_RELEVANT_STATE_SET,
+    PersistEventsStore,
+    SlidingSyncMembershipInfoWithEventPos,
+    SlidingSyncMembershipSnapshotSharedInsertValues,
+    SlidingSyncStateInsertValues,
+)
+from synapse.storage.databases.main.events_worker import (
+    DatabaseCorruptionError,
+    InvalidEventError,
+)
+from synapse.storage.databases.main.state_deltas import StateDeltasStore
+from synapse.storage.databases.main.stream import StreamWorkerStore
+from synapse.storage.engines import PostgresEngine
 from synapse.storage.types import Cursor
-from synapse.types import JsonDict, StrCollection
+from synapse.types import JsonDict, RoomStreamToken, StateMap, StrCollection
+from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES
+from synapse.types.state import StateFilter
+from synapse.types.storage import _BackgroundUpdates
+from synapse.util import json_encoder
+from synapse.util.iterutils import batch_iter
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -59,26 +77,6 @@ _REPLACE_STREAM_ORDERING_SQL_COMMANDS = (
 )
 
 
-class _BackgroundUpdates:
-    EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
-    EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
-    DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities"
-    POPULATE_STREAM_ORDERING2 = "populate_stream_ordering2"
-    INDEX_STREAM_ORDERING2 = "index_stream_ordering2"
-    INDEX_STREAM_ORDERING2_CONTAINS_URL = "index_stream_ordering2_contains_url"
-    INDEX_STREAM_ORDERING2_ROOM_ORDER = "index_stream_ordering2_room_order"
-    INDEX_STREAM_ORDERING2_ROOM_STREAM = "index_stream_ordering2_room_stream"
-    INDEX_STREAM_ORDERING2_TS = "index_stream_ordering2_ts"
-    REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column"
-
-    EVENT_EDGES_DROP_INVALID_ROWS = "event_edges_drop_invalid_rows"
-    EVENT_EDGES_REPLACE_INDEX = "event_edges_replace_index"
-
-    EVENTS_POPULATE_STATE_KEY_REJECTIONS = "events_populate_state_key_rejections"
-
-    EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index"
-
-
 @attr.s(slots=True, frozen=True, auto_attribs=True)
 class _CalculateChainCover:
     """Return value for _calculate_chain_cover_txn."""
@@ -97,7 +95,19 @@ class _CalculateChainCover:
     finished_room_map: Dict[str, Tuple[int, int]]
 
 
-class EventsBackgroundUpdatesStore(SQLBaseStore):
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _JoinedRoomStreamOrderingUpdate:
+    """
+    Intermediate container class used in `SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE`
+    """
+
+    # The most recent event stream_ordering for the room
+    most_recent_event_stream_ordering: int
+    # The most recent event `bump_stamp` for the room
+    most_recent_bump_stamp: Optional[int]
+
+
+class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseStore):
     def __init__(
         self,
         database: DatabasePool,
@@ -279,6 +289,34 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
             where_clause="NOT outlier",
         )
 
+        # Handle background updates for Sliding Sync tables
+        #
+        self.db_pool.updates.register_background_update_handler(
+            _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE,
+            self._sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update,
+        )
+        # Add some background updates to populate the sliding sync tables
+        self.db_pool.updates.register_background_update_handler(
+            _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE,
+            self._sliding_sync_joined_rooms_bg_update,
+        )
+        self.db_pool.updates.register_background_update_handler(
+            _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+            self._sliding_sync_membership_snapshots_bg_update,
+        )
+
+        # We want this to run on the main database at startup before we start processing
+        # events.
+        #
+        # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+        # foreground update for
+        # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+        # https://github.com/element-hq/synapse/issues/17623)
+        with db_conn.cursor(txn_name="resolve_sliding_sync") as txn:
+            _resolve_stale_data_in_sliding_sync_tables(
+                txn=txn,
+            )
+
     async def _background_reindex_fields_sender(
         self, progress: JsonDict, batch_size: int
     ) -> int:
@@ -586,7 +624,8 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
                 room_ids = {row[0] for row in rows}
                 for room_id in room_ids:
                     txn.call_after(
-                        self.get_latest_event_ids_in_room.invalidate, (room_id,)  # type: ignore[attr-defined]
+                        self.get_latest_event_ids_in_room.invalidate,  # type: ignore[attr-defined]
+                        (room_id,),
                     )
 
             self.db_pool.simple_delete_many_txn(
@@ -1073,7 +1112,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
         PersistEventsStore._add_chain_cover_index(
             txn,
             self.db_pool,
-            self.event_chain_id_gen,  # type: ignore[attr-defined]
+            self.event_chain_id_gen,
             event_to_room_id,
             event_to_types,
             cast(Dict[str, StrCollection], event_to_auth_chain),
@@ -1516,3 +1555,1137 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
             )
 
         return batch_size
+
+    async def _sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update(
+        self, progress: JsonDict, _batch_size: int
+    ) -> int:
+        """
+        Prefill `sliding_sync_joined_rooms_to_recalculate` table with all rooms we know about already.
+        """
+
+        def _txn(txn: LoggingTransaction) -> None:
+            # We do this as one big bulk insert. This has been tested on a bigger
+            # homeserver with ~10M rooms and took 60s. There is potential for this to
+            # starve disk usage while this goes on.
+            #
+            # We upsert in case we have to run this multiple times.
+            txn.execute(
+                """
+                INSERT INTO sliding_sync_joined_rooms_to_recalculate
+                    (room_id)
+                SELECT DISTINCT room_id FROM local_current_membership
+                WHERE membership = 'join'
+                ON CONFLICT (room_id)
+                DO NOTHING;
+                """,
+            )
+
+        await self.db_pool.runInteraction(
+            "_sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update",
+            _txn,
+        )
+
+        # Background update is done.
+        await self.db_pool.updates._end_background_update(
+            _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE
+        )
+        return 0
+
+    async def _sliding_sync_joined_rooms_bg_update(
+        self, progress: JsonDict, batch_size: int
+    ) -> int:
+        """
+        Background update to populate the `sliding_sync_joined_rooms` table.
+        """
+        # We don't need to fetch any progress state because we just grab the next N
+        # events in `sliding_sync_joined_rooms_to_recalculate`
+
+        def _get_rooms_to_update_txn(txn: LoggingTransaction) -> List[Tuple[str]]:
+            """
+            Returns:
+                A list of room ID's to update along with the progress value
+                (event_stream_ordering) indicating the continuation point in the
+                `current_state_events` table for the next batch.
+            """
+            # Fetch the set of room IDs that we want to update
+            #
+            # We use `current_state_events` table as the barometer for whether the
+            # server is still participating in the room because if we're
+            # `no_longer_in_room`, this table would be cleared out for the given
+            # `room_id`.
+            txn.execute(
+                """
+                SELECT room_id
+                FROM sliding_sync_joined_rooms_to_recalculate
+                LIMIT ?
+                """,
+                (batch_size,),
+            )
+
+            rooms_to_update_rows = cast(List[Tuple[str]], txn.fetchall())
+
+            return rooms_to_update_rows
+
+        rooms_to_update = await self.db_pool.runInteraction(
+            "_sliding_sync_joined_rooms_bg_update._get_rooms_to_update_txn",
+            _get_rooms_to_update_txn,
+        )
+
+        if not rooms_to_update:
+            await self.db_pool.updates._end_background_update(
+                _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE
+            )
+            return 0
+
+        # Map from room_id to insert/update state values in the `sliding_sync_joined_rooms` table.
+        joined_room_updates: Dict[str, SlidingSyncStateInsertValues] = {}
+        # Map from room_id to stream_ordering/bump_stamp, etc values
+        joined_room_stream_ordering_updates: Dict[
+            str, _JoinedRoomStreamOrderingUpdate
+        ] = {}
+        # As long as we get this value before we fetch the current state, we can use it
+        # to check if something has changed since that point.
+        most_recent_current_state_delta_stream_id = (
+            await self.get_max_stream_id_in_current_state_deltas()
+        )
+        for (room_id,) in rooms_to_update:
+            current_state_ids_map = await self.db_pool.runInteraction(
+                "_sliding_sync_joined_rooms_bg_update._get_relevant_sliding_sync_current_state_event_ids_txn",
+                PersistEventsStore._get_relevant_sliding_sync_current_state_event_ids_txn,
+                room_id,
+            )
+
+            # If we're not joined to the room a) it doesn't belong in the
+            # `sliding_sync_joined_rooms` table so we should skip and b) we won't have
+            # any `current_state_events` for the room.
+            if not current_state_ids_map:
+                continue
+
+            try:
+                fetched_events = await self.get_events(current_state_ids_map.values())
+            except (DatabaseCorruptionError, InvalidEventError) as e:
+                logger.warning(
+                    "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s",
+                    room_id,
+                    e,
+                )
+                continue
+
+            current_state_map: StateMap[EventBase] = {
+                state_key: fetched_events[event_id]
+                for state_key, event_id in current_state_ids_map.items()
+                # `get_events(...)` will filter out events for unknown room versions
+                if event_id in fetched_events
+            }
+
+            # Even if we are joined to the room, this can happen for unknown room
+            # versions (old room versions that aren't known anymore) since
+            # `get_events(...)` will filter out events for unknown room versions
+            if not current_state_map:
+                continue
+
+            state_insert_values = (
+                PersistEventsStore._get_sliding_sync_insert_values_from_state_map(
+                    current_state_map
+                )
+            )
+            # We should have some insert values for each room, even if they are `None`
+            assert state_insert_values
+            joined_room_updates[room_id] = state_insert_values
+
+            # Figure out the stream_ordering of the latest event in the room
+            most_recent_event_pos_results = await self.get_last_event_pos_in_room(
+                room_id, event_types=None
+            )
+            assert most_recent_event_pos_results is not None, (
+                f"We should not be seeing `None` here because the room ({room_id}) should at-least have a create event "
+                + "given we pulled the room out of `current_state_events`"
+            )
+            most_recent_event_stream_ordering = most_recent_event_pos_results[1].stream
+
+            # The `most_recent_event_stream_ordering` should be positive,
+            # however there are (very rare) rooms where that is not the case in
+            # the matrix.org database. It's not clear how they got into that
+            # state, but does mean that we cannot assert that the stream
+            # ordering is indeed positive.
+
+            # Figure out the latest `bump_stamp` in the room. This could be `None` for a
+            # federated room you just joined where all of events are still `outliers` or
+            # backfilled history. In the Sliding Sync API, we default to the user's
+            # membership event `stream_ordering` if we don't have a `bump_stamp` so
+            # having it as `None` in this table is fine.
+            bump_stamp_event_pos_results = await self.get_last_event_pos_in_room(
+                room_id, event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES
+            )
+            most_recent_bump_stamp = None
+            if (
+                bump_stamp_event_pos_results is not None
+                and bump_stamp_event_pos_results[1].stream > 0
+            ):
+                most_recent_bump_stamp = bump_stamp_event_pos_results[1].stream
+
+            joined_room_stream_ordering_updates[room_id] = (
+                _JoinedRoomStreamOrderingUpdate(
+                    most_recent_event_stream_ordering=most_recent_event_stream_ordering,
+                    most_recent_bump_stamp=most_recent_bump_stamp,
+                )
+            )
+
+        def _fill_table_txn(txn: LoggingTransaction) -> None:
+            # Handle updating the `sliding_sync_joined_rooms` table
+            #
+            for (
+                room_id,
+                update_map,
+            ) in joined_room_updates.items():
+                joined_room_stream_ordering_update = (
+                    joined_room_stream_ordering_updates[room_id]
+                )
+                event_stream_ordering = (
+                    joined_room_stream_ordering_update.most_recent_event_stream_ordering
+                )
+                bump_stamp = joined_room_stream_ordering_update.most_recent_bump_stamp
+
+                # Check if the current state has been updated since we gathered it.
+                # We're being careful not to insert/overwrite with stale data.
+                state_deltas_since_we_gathered_current_state = (
+                    self.get_current_state_deltas_for_room_txn(
+                        txn,
+                        room_id,
+                        from_token=RoomStreamToken(
+                            stream=most_recent_current_state_delta_stream_id
+                        ),
+                        to_token=None,
+                    )
+                )
+                for state_delta in state_deltas_since_we_gathered_current_state:
+                    # We only need to check for the state is relevant to the
+                    # `sliding_sync_joined_rooms` table.
+                    if (
+                        state_delta.event_type,
+                        state_delta.state_key,
+                    ) in SLIDING_SYNC_RELEVANT_STATE_SET:
+                        # Raising exception so we can just exit and try again. It would
+                        # be hard to resolve this within the transaction because we need
+                        # to get full events out that take redactions into account. We
+                        # could add some retry logic here, but it's easier to just let
+                        # the background update try again.
+                        raise Exception(
+                            "Current state was updated after we gathered it to update "
+                            + "`sliding_sync_joined_rooms` in the background update. "
+                            + "Raising exception so we can just try again."
+                        )
+
+                # Since we fully insert rows into `sliding_sync_joined_rooms`, we can
+                # just do everything on insert and `ON CONFLICT DO NOTHING`.
+                #
+                self.db_pool.simple_upsert_txn(
+                    txn,
+                    table="sliding_sync_joined_rooms",
+                    keyvalues={"room_id": room_id},
+                    values={},
+                    insertion_values={
+                        **update_map,
+                        # The reason we're only *inserting* (not *updating*) `event_stream_ordering`
+                        # and `bump_stamp` is because if they are present, that means they are already
+                        # up-to-date.
+                        "event_stream_ordering": event_stream_ordering,
+                        "bump_stamp": bump_stamp,
+                    },
+                )
+
+            # Now that we've processed all the room, we can remove them from the
+            # queue.
+            #
+            # Note: we need to remove all the rooms from the queue we pulled out
+            # from the DB, not just the ones we've processed above. Otherwise
+            # we'll simply keep pulling out the same rooms over and over again.
+            self.db_pool.simple_delete_many_batch_txn(
+                txn,
+                table="sliding_sync_joined_rooms_to_recalculate",
+                keys=("room_id",),
+                values=rooms_to_update,
+            )
+
+        await self.db_pool.runInteraction(
+            "sliding_sync_joined_rooms_bg_update", _fill_table_txn
+        )
+
+        return len(rooms_to_update)
+
+    async def _sliding_sync_membership_snapshots_bg_update(
+        self, progress: JsonDict, batch_size: int
+    ) -> int:
+        """
+        Background update to populate the `sliding_sync_membership_snapshots` table.
+        """
+        # We do this in two phases: a) the initial phase where we go through all
+        # room memberships, and then b) a second phase where we look at new
+        # memberships (this is to handle the case where we downgrade and then
+        # upgrade again).
+        #
+        # We have to do this as two phases (rather than just the second phase
+        # where we iterate on event_stream_ordering), as the
+        # `event_stream_ordering` column may have null values for old rows.
+        # Therefore we first do the set of historic rooms and *then* look at any
+        # new rows (which will have a non-null `event_stream_ordering`).
+        initial_phase = progress.get("initial_phase")
+        if initial_phase is None:
+            # If this is the first run, store the current max stream position.
+            # We know we will go through all memberships less than the current
+            # max in the initial phase.
+            progress = {
+                "initial_phase": True,
+                "last_event_stream_ordering": self.get_room_max_stream_ordering(),
+            }
+            await self.db_pool.updates._background_update_progress(
+                _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+                progress,
+            )
+            initial_phase = True
+
+        last_room_id = progress.get("last_room_id", "")
+        last_user_id = progress.get("last_user_id", "")
+        last_event_stream_ordering = progress["last_event_stream_ordering"]
+
+        def _find_memberships_to_update_txn(
+            txn: LoggingTransaction,
+        ) -> List[
+            Tuple[
+                str,
+                Optional[str],
+                Optional[str],
+                str,
+                str,
+                str,
+                str,
+                int,
+                Optional[str],
+                bool,
+            ]
+        ]:
+            # Fetch the set of event IDs that we want to update
+            #
+            # We skip over rows which we've already handled, i.e. have a
+            # matching row in `sliding_sync_membership_snapshots` with the same
+            # room, user and event ID.
+            #
+            # We also ignore rooms that the user has left themselves (i.e. not
+            # kicked). This is to avoid having to port lots of old rooms that we
+            # will never send down sliding sync (as we exclude such rooms from
+            # initial syncs).
+
+            if initial_phase:
+                # There are some old out-of-band memberships (before
+                # https://github.com/matrix-org/synapse/issues/6983) where we don't have
+                # the corresponding room stored in the `rooms` table`. We use `LEFT JOIN
+                # rooms AS r USING (room_id)` to find the rooms missing from `rooms` and
+                # insert a row for them below.
+                txn.execute(
+                    """
+                    SELECT
+                        c.room_id,
+                        r.room_id,
+                        r.room_version,
+                        c.user_id,
+                        e.sender,
+                        c.event_id,
+                        c.membership,
+                        e.stream_ordering,
+                        e.instance_name,
+                        e.outlier
+                    FROM local_current_membership AS c
+                    LEFT JOIN sliding_sync_membership_snapshots AS m USING (room_id, user_id)
+                    INNER JOIN events AS e USING (event_id)
+                    LEFT JOIN rooms AS r ON (c.room_id = r.room_id)
+                    WHERE (c.room_id, c.user_id) > (?, ?)
+                        AND (m.user_id IS NULL OR c.event_id != m.membership_event_id)
+                    ORDER BY c.room_id ASC, c.user_id ASC
+                    LIMIT ?
+                    """,
+                    (last_room_id, last_user_id, batch_size),
+                )
+            elif last_event_stream_ordering is not None:
+                # It's important to sort by `event_stream_ordering` *ascending* (oldest to
+                # newest) so that if we see that this background update in progress and want
+                # to start the catch-up process, we can safely assume that it will
+                # eventually get to the rooms we want to catch-up on anyway (see
+                # `_resolve_stale_data_in_sliding_sync_tables()`).
+                #
+                # `c.room_id` is duplicated to make it match what we're doing in the
+                # `initial_phase`. But we can avoid doing the extra `rooms` table join
+                # because we can assume all of these new events won't have this problem.
+                txn.execute(
+                    """
+                    SELECT
+                        c.room_id,
+                        r.room_id,
+                        r.room_version,
+                        c.user_id,
+                        e.sender,
+                        c.event_id,
+                        c.membership,
+                        c.event_stream_ordering,
+                        e.instance_name,
+                        e.outlier
+                    FROM local_current_membership AS c
+                    LEFT JOIN sliding_sync_membership_snapshots AS m USING (room_id, user_id)
+                    INNER JOIN events AS e USING (event_id)
+                    LEFT JOIN rooms AS r ON (c.room_id = r.room_id)
+                    WHERE c.event_stream_ordering > ?
+                        AND (m.user_id IS NULL OR c.event_id != m.membership_event_id)
+                    ORDER BY c.event_stream_ordering ASC
+                    LIMIT ?
+                    """,
+                    (last_event_stream_ordering, batch_size),
+                )
+            else:
+                raise Exception("last_event_stream_ordering should not be None")
+
+            memberships_to_update_rows = cast(
+                List[
+                    Tuple[
+                        str,
+                        Optional[str],
+                        Optional[str],
+                        str,
+                        str,
+                        str,
+                        str,
+                        int,
+                        Optional[str],
+                        bool,
+                    ]
+                ],
+                txn.fetchall(),
+            )
+
+            return memberships_to_update_rows
+
+        memberships_to_update_rows = await self.db_pool.runInteraction(
+            "sliding_sync_membership_snapshots_bg_update._find_memberships_to_update_txn",
+            _find_memberships_to_update_txn,
+        )
+
+        if not memberships_to_update_rows:
+            if initial_phase:
+                # Move onto the next phase.
+                await self.db_pool.updates._background_update_progress(
+                    _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+                    {
+                        "initial_phase": False,
+                        "last_event_stream_ordering": last_event_stream_ordering,
+                    },
+                )
+                return 0
+            else:
+                # We've finished both phases, we're done.
+                await self.db_pool.updates._end_background_update(
+                    _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE
+                )
+                return 0
+
+        def _find_previous_invite_or_knock_membership_txn(
+            txn: LoggingTransaction, room_id: str, user_id: str, event_id: str
+        ) -> Optional[Tuple[str, str]]:
+            # Find the previous invite/knock event before the leave event
+            #
+            # Here are some notes on how we landed on this query:
+            #
+            # We're using `topological_ordering` instead of `stream_ordering` because
+            # somehow it's possible to have your `leave` event backfilled with a
+            # negative `stream_ordering` and your previous `invite` event with a
+            # positive `stream_ordering` so we wouldn't have a chance of finding the
+            # previous membership with a naive `event_stream_ordering < ?` comparison.
+            #
+            # Also be careful because `room_memberships.event_stream_ordering` is
+            # nullable and not always filled in. You would need to join on `events` to
+            # rely on `events.stream_ordering` instead. Even though the
+            # `events.stream_ordering` also doesn't have a `NOT NULL` constraint, it
+            # doesn't have any rows where this is the case (checked on `matrix.org`).
+            # The fact the `events.stream_ordering` is a nullable column is a holdover
+            # from a rename of the column.
+            #
+            # You might also consider using the `event_auth` table to find the previous
+            # membership, but there are cases where somehow a membership event doesn't
+            # point back to the previous membership event in the auth events (unknown
+            # cause).
+            txn.execute(
+                """
+                SELECT event_id, membership
+                FROM room_memberships AS m
+                INNER JOIN events AS e USING (room_id, event_id)
+                WHERE
+                    room_id = ?
+                    AND m.user_id = ?
+                    AND (m.membership = ? OR m.membership = ?)
+                    AND e.event_id != ?
+                ORDER BY e.topological_ordering DESC
+                LIMIT 1
+                """,
+                (
+                    room_id,
+                    user_id,
+                    # We look explicitly for `invite` and `knock` events instead of
+                    # just their previous membership as someone could have been `invite`
+                    # -> `ban` -> unbanned (`leave`) and we want to find the `invite`
+                    # event where the stripped state is.
+                    Membership.INVITE,
+                    Membership.KNOCK,
+                    event_id,
+                ),
+            )
+            row = txn.fetchone()
+
+            if row is None:
+                # Generally we should have an invite or knock event for leaves
+                # that are outliers, however this may not always be the case
+                # (e.g. a local user got kicked but the kick event got pulled in
+                # as an outlier).
+                return None
+
+            event_id, membership = row
+
+            return event_id, membership
+
+        # Map from (room_id, user_id) to ...
+        to_insert_membership_snapshots: Dict[
+            Tuple[str, str], SlidingSyncMembershipSnapshotSharedInsertValues
+        ] = {}
+        to_insert_membership_infos: Dict[
+            Tuple[str, str], SlidingSyncMembershipInfoWithEventPos
+        ] = {}
+        for (
+            room_id,
+            room_id_from_rooms_table,
+            room_version_id,
+            user_id,
+            sender,
+            membership_event_id,
+            membership,
+            membership_event_stream_ordering,
+            membership_event_instance_name,
+            is_outlier,
+        ) in memberships_to_update_rows:
+            # We don't know how to handle `membership` values other than these. The
+            # code below would need to be updated.
+            assert membership in (
+                Membership.JOIN,
+                Membership.INVITE,
+                Membership.KNOCK,
+                Membership.LEAVE,
+                Membership.BAN,
+            )
+
+            if (
+                room_version_id is not None
+                and room_version_id not in KNOWN_ROOM_VERSIONS
+            ):
+                # Ignore rooms with unknown room versions (these were
+                # experimental rooms, that we no longer support).
+                continue
+
+            # There are some old out-of-band memberships (before
+            # https://github.com/matrix-org/synapse/issues/6983) where we don't have the
+            # corresponding room stored in the `rooms` table`. We have a `FOREIGN KEY`
+            # constraint on the `sliding_sync_membership_snapshots` table so we have to
+            # fix-up these memberships by adding the room to the `rooms` table.
+            if room_id_from_rooms_table is None:
+                await self.db_pool.simple_insert(
+                    table="rooms",
+                    values={
+                        "room_id": room_id,
+                        # Only out-of-band memberships are missing from the `rooms`
+                        # table so that is the only type of membership we're dealing
+                        # with here. Since we don't calculate the "chain cover" for
+                        # out-of-band memberships, we can just set this to `True` as if
+                        # the user ever joins the room, we will end up calculating the
+                        # "chain cover" anyway.
+                        "has_auth_chain_index": True,
+                    },
+                )
+
+            # Map of values to insert/update in the `sliding_sync_membership_snapshots` table
+            sliding_sync_membership_snapshots_insert_map: SlidingSyncMembershipSnapshotSharedInsertValues = {}
+            if membership == Membership.JOIN:
+                # If we're still joined, we can pull from current state.
+                current_state_ids_map: StateMap[
+                    str
+                ] = await self.hs.get_storage_controllers().state.get_current_state_ids(
+                    room_id,
+                    state_filter=StateFilter.from_types(
+                        SLIDING_SYNC_RELEVANT_STATE_SET
+                    ),
+                    # Partially-stated rooms should have all state events except for
+                    # remote membership events so we don't need to wait at all because
+                    # we only want some non-membership state
+                    await_full_state=False,
+                )
+                # We're iterating over rooms that we are joined to so they should
+                # have `current_state_events` and we should have some current state
+                # for each room
+                if current_state_ids_map:
+                    try:
+                        fetched_events = await self.get_events(
+                            current_state_ids_map.values()
+                        )
+                    except (DatabaseCorruptionError, InvalidEventError) as e:
+                        logger.warning(
+                            "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s",
+                            room_id,
+                            e,
+                        )
+                        continue
+
+                    current_state_map: StateMap[EventBase] = {
+                        state_key: fetched_events[event_id]
+                        for state_key, event_id in current_state_ids_map.items()
+                        # `get_events(...)` will filter out events for unknown room versions
+                        if event_id in fetched_events
+                    }
+
+                    # Can happen for unknown room versions (old room versions that aren't known
+                    # anymore) since `get_events(...)` will filter out events for unknown room
+                    # versions
+                    if not current_state_map:
+                        continue
+
+                    state_insert_values = PersistEventsStore._get_sliding_sync_insert_values_from_state_map(
+                        current_state_map
+                    )
+                    sliding_sync_membership_snapshots_insert_map.update(
+                        state_insert_values
+                    )
+                    # We should have some insert values for each room, even if they are `None`
+                    assert sliding_sync_membership_snapshots_insert_map
+
+                    # We have current state to work from
+                    sliding_sync_membership_snapshots_insert_map["has_known_state"] = (
+                        True
+                    )
+                else:
+                    # Although we expect every room to have a create event (even
+                    # past unknown room versions since we haven't supported one
+                    # without it), there seem to be some corrupted rooms in
+                    # practice that don't have the create event in the
+                    # `current_state_events` table. The create event does exist
+                    # in the events table though. We'll just say that we don't
+                    # know the state for these rooms and continue on with our
+                    # day.
+                    sliding_sync_membership_snapshots_insert_map = {
+                        "has_known_state": False,
+                        "room_type": None,
+                        "room_name": None,
+                        "is_encrypted": False,
+                    }
+            elif membership in (Membership.INVITE, Membership.KNOCK) or (
+                membership in (Membership.LEAVE, Membership.BAN) and is_outlier
+            ):
+                invite_or_knock_event_id = None
+                invite_or_knock_membership = None
+
+                # If the event is an `out_of_band_membership` (special case of
+                # `outlier`), we never had historical state so we have to pull from
+                # the stripped state on the previous invite/knock event. This gives
+                # us a consistent view of the room state regardless of your
+                # membership (i.e. the room shouldn't disappear if your using the
+                # `is_encrypted` filter and you leave).
+                if membership in (Membership.LEAVE, Membership.BAN) and is_outlier:
+                    previous_membership = await self.db_pool.runInteraction(
+                        "sliding_sync_membership_snapshots_bg_update._find_previous_invite_or_knock_membership_txn",
+                        _find_previous_invite_or_knock_membership_txn,
+                        room_id,
+                        user_id,
+                        membership_event_id,
+                    )
+                    if previous_membership is not None:
+                        (
+                            invite_or_knock_event_id,
+                            invite_or_knock_membership,
+                        ) = previous_membership
+                else:
+                    invite_or_knock_event_id = membership_event_id
+                    invite_or_knock_membership = membership
+
+                if (
+                    invite_or_knock_event_id is not None
+                    and invite_or_knock_membership is not None
+                ):
+                    # Pull from the stripped state on the invite/knock event
+                    invite_or_knock_event = await self.get_event(
+                        invite_or_knock_event_id
+                    )
+
+                    raw_stripped_state_events = None
+                    if invite_or_knock_membership == Membership.INVITE:
+                        invite_room_state = invite_or_knock_event.unsigned.get(
+                            "invite_room_state"
+                        )
+                        raw_stripped_state_events = invite_room_state
+                    elif invite_or_knock_membership == Membership.KNOCK:
+                        knock_room_state = invite_or_knock_event.unsigned.get(
+                            "knock_room_state"
+                        )
+                        raw_stripped_state_events = knock_room_state
+
+                    sliding_sync_membership_snapshots_insert_map = PersistEventsStore._get_sliding_sync_insert_values_from_stripped_state(
+                        raw_stripped_state_events
+                    )
+                else:
+                    # We couldn't find any state for the membership, so we just have to
+                    # leave it as empty.
+                    sliding_sync_membership_snapshots_insert_map = {
+                        "has_known_state": False,
+                        "room_type": None,
+                        "room_name": None,
+                        "is_encrypted": False,
+                    }
+
+                # We should have some insert values for each room, even if no
+                # stripped state is on the event because we still want to record
+                # that we have no known state
+                assert sliding_sync_membership_snapshots_insert_map
+            elif membership in (Membership.LEAVE, Membership.BAN):
+                # Pull from historical state
+                state_ids_map = await self.hs.get_storage_controllers().state.get_state_ids_for_event(
+                    membership_event_id,
+                    state_filter=StateFilter.from_types(
+                        SLIDING_SYNC_RELEVANT_STATE_SET
+                    ),
+                    # Partially-stated rooms should have all state events except for
+                    # remote membership events so we don't need to wait at all because
+                    # we only want some non-membership state
+                    await_full_state=False,
+                )
+
+                try:
+                    fetched_events = await self.get_events(state_ids_map.values())
+                except (DatabaseCorruptionError, InvalidEventError) as e:
+                    logger.warning(
+                        "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s",
+                        room_id,
+                        e,
+                    )
+                    continue
+
+                state_map: StateMap[EventBase] = {
+                    state_key: fetched_events[event_id]
+                    for state_key, event_id in state_ids_map.items()
+                    # `get_events(...)` will filter out events for unknown room versions
+                    if event_id in fetched_events
+                }
+
+                # Can happen for unknown room versions (old room versions that aren't known
+                # anymore) since `get_events(...)` will filter out events for unknown room
+                # versions
+                if not state_map:
+                    continue
+
+                state_insert_values = (
+                    PersistEventsStore._get_sliding_sync_insert_values_from_state_map(
+                        state_map
+                    )
+                )
+                sliding_sync_membership_snapshots_insert_map.update(state_insert_values)
+                # We should have some insert values for each room, even if they are `None`
+                assert sliding_sync_membership_snapshots_insert_map
+
+                # We have historical state to work from
+                sliding_sync_membership_snapshots_insert_map["has_known_state"] = True
+            else:
+                # We don't know how to handle this type of membership yet
+                #
+                # FIXME: We should use `assert_never` here but for some reason
+                # the exhaustive matching doesn't recognize the `Never` here.
+                # assert_never(membership)
+                raise AssertionError(
+                    f"Unexpected membership {membership} ({membership_event_id}) that we don't know how to handle yet"
+                )
+
+            to_insert_membership_snapshots[(room_id, user_id)] = (
+                sliding_sync_membership_snapshots_insert_map
+            )
+            to_insert_membership_infos[(room_id, user_id)] = (
+                SlidingSyncMembershipInfoWithEventPos(
+                    user_id=user_id,
+                    sender=sender,
+                    membership_event_id=membership_event_id,
+                    membership=membership,
+                    membership_event_stream_ordering=membership_event_stream_ordering,
+                    # If instance_name is null we default to "master"
+                    membership_event_instance_name=membership_event_instance_name
+                    or "master",
+                )
+            )
+
+        def _fill_table_txn(txn: LoggingTransaction) -> None:
+            # Handle updating the `sliding_sync_membership_snapshots` table
+            #
+            for key, insert_map in to_insert_membership_snapshots.items():
+                room_id, user_id = key
+                membership_info = to_insert_membership_infos[key]
+                sender = membership_info.sender
+                membership_event_id = membership_info.membership_event_id
+                membership = membership_info.membership
+                membership_event_stream_ordering = (
+                    membership_info.membership_event_stream_ordering
+                )
+                membership_event_instance_name = (
+                    membership_info.membership_event_instance_name
+                )
+
+                # We don't need to upsert the state because we never partially
+                # insert/update the snapshots and anything already there is up-to-date
+                # EXCEPT for the `forgotten` field since that is updated out-of-band
+                # from the membership changes.
+                #
+                # Even though we're only doing insertions, we're using
+                # `simple_upsert_txn()` here to avoid unique violation errors that would
+                # happen from `simple_insert_txn()`
+                self.db_pool.simple_upsert_txn(
+                    txn,
+                    table="sliding_sync_membership_snapshots",
+                    keyvalues={"room_id": room_id, "user_id": user_id},
+                    values={},
+                    insertion_values={
+                        **insert_map,
+                        "sender": sender,
+                        "membership_event_id": membership_event_id,
+                        "membership": membership,
+                        "event_stream_ordering": membership_event_stream_ordering,
+                        "event_instance_name": membership_event_instance_name,
+                    },
+                )
+                # We need to find the `forgotten` value during the transaction because
+                # we can't risk inserting stale data.
+                if isinstance(txn.database_engine, PostgresEngine):
+                    txn.execute(
+                        """
+                        UPDATE sliding_sync_membership_snapshots
+                        SET
+                            forgotten = m.forgotten
+                        FROM room_memberships AS m
+                        WHERE sliding_sync_membership_snapshots.room_id = ?
+                            AND sliding_sync_membership_snapshots.user_id = ?
+                            AND membership_event_id = ?
+                            AND membership_event_id = m.event_id
+                            AND m.event_id IS NOT NULL
+                        """,
+                        (
+                            room_id,
+                            user_id,
+                            membership_event_id,
+                        ),
+                    )
+                else:
+                    # SQLite doesn't support UPDATE FROM before 3.33.0, so we do
+                    # this via sub-selects.
+                    txn.execute(
+                        """
+                        UPDATE sliding_sync_membership_snapshots
+                        SET
+                            forgotten = (SELECT forgotten FROM room_memberships WHERE event_id = ?)
+                        WHERE room_id = ? and user_id = ? AND membership_event_id = ?
+                        """,
+                        (
+                            membership_event_id,
+                            room_id,
+                            user_id,
+                            membership_event_id,
+                        ),
+                    )
+
+        await self.db_pool.runInteraction(
+            "sliding_sync_membership_snapshots_bg_update", _fill_table_txn
+        )
+
+        # Update the progress
+        (
+            room_id,
+            _room_id_from_rooms_table,
+            _room_version_id,
+            user_id,
+            _sender,
+            _membership_event_id,
+            _membership,
+            membership_event_stream_ordering,
+            _membership_event_instance_name,
+            _is_outlier,
+        ) = memberships_to_update_rows[-1]
+
+        progress = {
+            "initial_phase": initial_phase,
+            "last_room_id": room_id,
+            "last_user_id": user_id,
+            "last_event_stream_ordering": last_event_stream_ordering,
+        }
+        if not initial_phase:
+            progress["last_event_stream_ordering"] = membership_event_stream_ordering
+
+        await self.db_pool.updates._background_update_progress(
+            _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+            progress,
+        )
+
+        return len(memberships_to_update_rows)
+
+
+def _resolve_stale_data_in_sliding_sync_tables(
+    txn: LoggingTransaction,
+) -> None:
+    """
+    Clears stale/out-of-date entries from the
+    `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables.
+
+    This accounts for when someone downgrades their Synapse version and then upgrades it
+    again. This will ensure that we don't have any stale/out-of-date data in the
+    `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables since any new
+    events sent in rooms would have also needed to be written to the sliding sync
+    tables. For example a new event needs to bump `event_stream_ordering` in
+    `sliding_sync_joined_rooms` table or some state in the room changing (like the room
+    name). Or another example of someone's membership changing in a room affecting
+    `sliding_sync_membership_snapshots`.
+
+    This way, if a row exists in the sliding sync tables, we are able to rely on it
+    (accurate data). And if a row doesn't exist, we use a fallback to get the same info
+    until the background updates fill in the rows or a new event comes in triggering it
+    to be fully inserted.
+
+    FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+    foreground update for
+    `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+    https://github.com/element-hq/synapse/issues/17623)
+    """
+
+    _resolve_stale_data_in_sliding_sync_joined_rooms_table(txn)
+    _resolve_stale_data_in_sliding_sync_membership_snapshots_table(txn)
+
+
+def _resolve_stale_data_in_sliding_sync_joined_rooms_table(
+    txn: LoggingTransaction,
+) -> None:
+    """
+    Clears stale/out-of-date entries from the `sliding_sync_joined_rooms` table and
+    kicks-off the background update to catch-up with what we missed while Synapse was
+    downgraded.
+
+    See `_resolve_stale_data_in_sliding_sync_tables()` description above for more
+    context.
+    """
+
+    # Find the point when we stopped writing to the `sliding_sync_joined_rooms` table
+    txn.execute(
+        """
+        SELECT event_stream_ordering
+        FROM sliding_sync_joined_rooms
+        ORDER BY event_stream_ordering DESC
+        LIMIT 1
+        """,
+    )
+
+    # If we have nothing written to the `sliding_sync_joined_rooms` table, there is
+    # nothing to clean up
+    row = cast(Optional[Tuple[int]], txn.fetchone())
+    max_stream_ordering_sliding_sync_joined_rooms_table = None
+    depends_on = None
+    if row is not None:
+        (max_stream_ordering_sliding_sync_joined_rooms_table,) = row
+
+        txn.execute(
+            """
+            SELECT room_id
+            FROM events
+            WHERE stream_ordering > ?
+            GROUP BY room_id
+            ORDER BY MAX(stream_ordering) ASC
+            """,
+            (max_stream_ordering_sliding_sync_joined_rooms_table,),
+        )
+
+        room_rows = txn.fetchall()
+        # No new events have been written to the `events` table since the last time we wrote
+        # to the `sliding_sync_joined_rooms` table so there is nothing to clean up. This is
+        # the expected normal scenario for people who have not downgraded their Synapse
+        # version.
+        if not room_rows:
+            return
+
+        # 1000 is an arbitrary batch size with no testing
+        for chunk in batch_iter(room_rows, 1000):
+            # Handle updating the `sliding_sync_joined_rooms` table
+            #
+            # Clear out the stale data
+            DatabasePool.simple_delete_many_batch_txn(
+                txn,
+                table="sliding_sync_joined_rooms",
+                keys=("room_id",),
+                values=chunk,
+            )
+
+            # Update the `sliding_sync_joined_rooms_to_recalculate` table with the rooms
+            # that went stale and now need to be recalculated.
+            DatabasePool.simple_upsert_many_txn_native_upsert(
+                txn,
+                table="sliding_sync_joined_rooms_to_recalculate",
+                key_names=("room_id",),
+                key_values=chunk,
+                value_names=(),
+                # No value columns, therefore make a blank list so that the following
+                # zip() works correctly.
+                value_values=[() for x in range(len(chunk))],
+            )
+    else:
+        # Avoid adding the background updates when there is no data to run them on (if
+        # the homeserver has no rooms). The portdb script refuses to run with pending
+        # background updates and since we potentially add them every time the server
+        # starts, we add this check for to allow the script to breath.
+        txn.execute("SELECT 1 FROM local_current_membership LIMIT 1")
+        row = txn.fetchone()
+        if row is None:
+            # There are no rooms, so don't schedule the bg update.
+            return
+
+        # Re-run the `sliding_sync_joined_rooms_to_recalculate` prefill if there is
+        # nothing in the `sliding_sync_joined_rooms` table
+        DatabasePool.simple_upsert_txn_native_upsert(
+            txn,
+            table="background_updates",
+            keyvalues={
+                "update_name": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE
+            },
+            values={},
+            # Only insert the row if it doesn't already exist. If it already exists,
+            # we're already working on it
+            insertion_values={
+                "progress_json": "{}",
+            },
+        )
+        depends_on = _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE
+
+    # Now kick-off the background update to catch-up with what we missed while Synapse
+    # was downgraded.
+    #
+    # We may need to catch-up on everything if we have nothing written to the
+    # `sliding_sync_joined_rooms` table yet. This could happen if someone had zero rooms
+    # on their server (so the normal background update completes), downgrade Synapse
+    # versions, join and create some new rooms, and upgrade again.
+    DatabasePool.simple_upsert_txn_native_upsert(
+        txn,
+        table="background_updates",
+        keyvalues={
+            "update_name": _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE
+        },
+        values={},
+        # Only insert the row if it doesn't already exist. If it already exists, we will
+        # eventually fill in the rows we're trying to populate.
+        insertion_values={
+            # Empty progress is expected since it's not used for this background update.
+            "progress_json": "{}",
+            # Wait for the prefill to finish
+            "depends_on": depends_on,
+        },
+    )
+
+
+def _resolve_stale_data_in_sliding_sync_membership_snapshots_table(
+    txn: LoggingTransaction,
+) -> None:
+    """
+    Clears stale/out-of-date entries from the `sliding_sync_membership_snapshots` table
+    and kicks-off the background update to catch-up with what we missed while Synapse
+    was downgraded.
+
+    See `_resolve_stale_data_in_sliding_sync_tables()` description above for more
+    context.
+    """
+
+    # Find the point when we stopped writing to the `sliding_sync_membership_snapshots` table
+    txn.execute(
+        """
+        SELECT event_stream_ordering
+        FROM sliding_sync_membership_snapshots
+        ORDER BY event_stream_ordering DESC
+        LIMIT 1
+        """,
+    )
+
+    # If we have nothing written to the `sliding_sync_membership_snapshots` table,
+    # there is nothing to clean up
+    row = cast(Optional[Tuple[int]], txn.fetchone())
+    max_stream_ordering_sliding_sync_membership_snapshots_table = None
+    if row is not None:
+        (max_stream_ordering_sliding_sync_membership_snapshots_table,) = row
+
+        # XXX: Since `forgotten` is simply a flag on the `room_memberships` table that is
+        # set out-of-band, there is no way to tell whether it was set while Synapse was
+        # downgraded. The only thing the user can do is `/forget` again if they run into
+        # this.
+        #
+        # This only picks up changes to memberships.
+        txn.execute(
+            """
+            SELECT user_id, room_id
+            FROM local_current_membership
+            WHERE event_stream_ordering > ?
+            ORDER BY event_stream_ordering ASC
+            """,
+            (max_stream_ordering_sliding_sync_membership_snapshots_table,),
+        )
+
+        membership_rows = txn.fetchall()
+        # No new events have been written to the `events` table since the last time we wrote
+        # to the `sliding_sync_membership_snapshots` table so there is nothing to clean up.
+        # This is the expected normal scenario for people who have not downgraded their
+        # Synapse version.
+        if not membership_rows:
+            return
+
+        # 1000 is an arbitrary batch size with no testing
+        for chunk in batch_iter(membership_rows, 1000):
+            # Handle updating the `sliding_sync_membership_snapshots` table
+            #
+            DatabasePool.simple_delete_many_batch_txn(
+                txn,
+                table="sliding_sync_membership_snapshots",
+                keys=("user_id", "room_id"),
+                values=chunk,
+            )
+    else:
+        # Avoid adding the background updates when there is no data to run them on (if
+        # the homeserver has no rooms). The portdb script refuses to run with pending
+        # background updates and since we potentially add them every time the server
+        # starts, we add this check for to allow the script to breath.
+        txn.execute("SELECT 1 FROM local_current_membership LIMIT 1")
+        row = txn.fetchone()
+        if row is None:
+            # There are no rooms, so don't schedule the bg update.
+            return
+
+    # Now kick-off the background update to catch-up with what we missed while Synapse
+    # was downgraded.
+    #
+    # We may need to catch-up on everything if we have nothing written to the
+    # `sliding_sync_membership_snapshots` table yet. This could happen if someone had
+    # zero rooms on their server (so the normal background update completes), downgrade
+    # Synapse versions, join and create some new rooms, and upgrade again.
+    #
+    progress_json: JsonDict = {}
+    if max_stream_ordering_sliding_sync_membership_snapshots_table is not None:
+        progress_json["initial_phase"] = False
+        progress_json["last_event_stream_ordering"] = (
+            max_stream_ordering_sliding_sync_membership_snapshots_table
+        )
+
+    DatabasePool.simple_upsert_txn_native_upsert(
+        txn,
+        table="background_updates",
+        keyvalues={
+            "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE
+        },
+        values={},
+        # Only insert the row if it doesn't already exist. If it already exists, we will
+        # eventually fill in the rows we're trying to populate.
+        insertion_values={
+            "progress_json": json_encoder.encode(progress_json),
+        },
+    )
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index a5acea8c3b..403407068c 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -61,7 +61,13 @@ from synapse.logging.context import (
     current_context,
     make_deferred_yieldable,
 )
-from synapse.logging.opentracing import start_active_span, tag_args, trace
+from synapse.logging.opentracing import (
+    SynapseTags,
+    set_tag,
+    start_active_span,
+    tag_args,
+    trace,
+)
 from synapse.metrics.background_process_metrics import (
     run_as_background_process,
     wrap_as_background_process,
@@ -83,6 +89,7 @@ from synapse.storage.util.id_generators import (
 from synapse.storage.util.sequence import build_sequence_generator
 from synapse.types import JsonDict, get_domain_from_id
 from synapse.types.state import StateFilter
+from synapse.types.storage import _BackgroundUpdates
 from synapse.util import unwrapFirstError
 from synapse.util.async_helpers import ObservableDeferred, delay_cancellation
 from synapse.util.caches.descriptors import cached, cachedList
@@ -98,6 +105,26 @@ if TYPE_CHECKING:
 logger = logging.getLogger(__name__)
 
 
+class DatabaseCorruptionError(RuntimeError):
+    """We found an event in the DB that has a persisted event ID that doesn't
+    match its computed event ID."""
+
+    def __init__(
+        self, room_id: str, persisted_event_id: str, computed_event_id: str
+    ) -> None:
+        self.room_id = room_id
+        self.persisted_event_id = persisted_event_id
+        self.computed_event_id = computed_event_id
+
+        message = (
+            f"Database corruption: Event {persisted_event_id} in room {room_id} "
+            f"from the database appears to have been modified (calculated "
+            f"event id {computed_event_id})"
+        )
+
+        super().__init__(message)
+
+
 # These values are used in the `enqueue_event` and `_fetch_loop` methods to
 # control how we batch/bulk fetch events from the database.
 # The values are plucked out of thing air to make initial sync run faster
@@ -457,6 +484,8 @@ class EventsWorkerStore(SQLBaseStore):
     ) -> Optional[EventBase]:
         """Get an event from the database by event_id.
 
+        Events for unknown room versions will also be filtered out.
+
         Args:
             event_id: The event_id of the event to fetch
 
@@ -502,6 +531,7 @@ class EventsWorkerStore(SQLBaseStore):
 
         return event
 
+    @trace
     async def get_events(
         self,
         event_ids: Collection[str],
@@ -511,6 +541,10 @@ class EventsWorkerStore(SQLBaseStore):
     ) -> Dict[str, EventBase]:
         """Get events from the database
 
+        Unknown events will be omitted from the response.
+
+        Events for unknown room versions will also be filtered out.
+
         Args:
             event_ids: The event_ids of the events to fetch
 
@@ -529,6 +563,11 @@ class EventsWorkerStore(SQLBaseStore):
         Returns:
             A mapping from event_id to event.
         """
+        set_tag(
+            SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
+            str(len(event_ids)),
+        )
+
         events = await self.get_events_as_list(
             event_ids,
             redact_behaviour=redact_behaviour,
@@ -553,6 +592,8 @@ class EventsWorkerStore(SQLBaseStore):
 
         Unknown events will be omitted from the response.
 
+        Events for unknown room versions will also be filtered out.
+
         Args:
             event_ids: The event_ids of the events to fetch
 
@@ -574,6 +615,10 @@ class EventsWorkerStore(SQLBaseStore):
             Note that the returned list may be smaller than the list of event
             IDs if not all events could be fetched.
         """
+        set_tag(
+            SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
+            str(len(event_ids)),
+        )
 
         if not event_ids:
             return []
@@ -694,10 +739,11 @@ class EventsWorkerStore(SQLBaseStore):
 
         return events
 
+    @trace
     @cancellable
     async def get_unredacted_events_from_cache_or_db(
         self,
-        event_ids: Iterable[str],
+        event_ids: Collection[str],
         allow_rejected: bool = False,
     ) -> Dict[str, EventCacheEntry]:
         """Fetch a bunch of events from the cache or the database.
@@ -719,6 +765,11 @@ class EventsWorkerStore(SQLBaseStore):
         Returns:
             map from event id to result
         """
+        set_tag(
+            SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
+            str(len(event_ids)),
+        )
+
         # Shortcut: check if we have any events in the *in memory* cache - this function
         # may be called repeatedly for the same event so at this point we cannot reach
         # out to any external cache for performance reasons. The external cache is
@@ -907,7 +958,7 @@ class EventsWorkerStore(SQLBaseStore):
             events, update_metrics=update_metrics
         )
 
-        missing_event_ids = (e for e in events if e not in event_map)
+        missing_event_ids = [e for e in events if e not in event_map]
         event_map.update(
             await self._get_events_from_external_cache(
                 events=missing_event_ids,
@@ -917,8 +968,9 @@ class EventsWorkerStore(SQLBaseStore):
 
         return event_map
 
+    @trace
     async def _get_events_from_external_cache(
-        self, events: Iterable[str], update_metrics: bool = True
+        self, events: Collection[str], update_metrics: bool = True
     ) -> Dict[str, EventCacheEntry]:
         """Fetch events from any configured external cache.
 
@@ -928,6 +980,10 @@ class EventsWorkerStore(SQLBaseStore):
             events: list of event_ids to fetch
             update_metrics: Whether to update the cache hit ratio metrics
         """
+        set_tag(
+            SynapseTags.FUNC_ARG_PREFIX + "events.length",
+            str(len(events)),
+        )
         event_map = {}
 
         for event_id in events:
@@ -1193,6 +1249,7 @@ class EventsWorkerStore(SQLBaseStore):
                 with PreserveLoggingContext():
                     self.hs.get_reactor().callFromThread(fire_errback, e)
 
+    @trace
     async def _get_events_from_db(
         self, event_ids: Collection[str]
     ) -> Dict[str, EventCacheEntry]:
@@ -1211,6 +1268,11 @@ class EventsWorkerStore(SQLBaseStore):
             map from event id to result. May return extra events which
             weren't asked for.
         """
+        set_tag(
+            SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
+            str(len(event_ids)),
+        )
+
         fetched_event_ids: Set[str] = set()
         fetched_events: Dict[str, _EventRow] = {}
 
@@ -1356,10 +1418,8 @@ class EventsWorkerStore(SQLBaseStore):
             if original_ev.event_id != event_id:
                 # it's difficult to see what to do here. Pretty much all bets are off
                 # if Synapse cannot rely on the consistency of its database.
-                raise RuntimeError(
-                    f"Database corruption: Event {event_id} in room {d['room_id']} "
-                    f"from the database appears to have been modified (calculated "
-                    f"event id {original_ev.event_id})"
+                raise DatabaseCorruptionError(
+                    d["room_id"], event_id, original_ev.event_id
                 )
 
             event_map[event_id] = original_ev
@@ -1457,7 +1517,8 @@ class EventsWorkerStore(SQLBaseStore):
                 event_dict[event_id] = _EventRow(
                     event_id=event_id,
                     stream_ordering=row[1],
-                    instance_name=row[2],
+                    # If instance_name is null we default to "master"
+                    instance_name=row[2] or "master",
                     internal_metadata=row[3],
                     json=row[4],
                     format_version=row[5],
@@ -1638,7 +1699,7 @@ class EventsWorkerStore(SQLBaseStore):
                 txn.database_engine, "e.event_id", event_ids
             )
             txn.execute(sql + clause, args)
-            found_events = {eid for eid, in txn}
+            found_events = {eid for (eid,) in txn}
 
             # ... and then we can update the results for each key
             return {eid: (eid in found_events) for eid in event_ids}
@@ -1837,9 +1898,9 @@ class EventsWorkerStore(SQLBaseStore):
                 " LIMIT ?"
             )
             txn.execute(sql, (-last_id, -current_id, instance_name, limit))
-            new_event_updates: List[Tuple[int, Tuple[str, str, str, str, str, str]]] = (
-                []
-            )
+            new_event_updates: List[
+                Tuple[int, Tuple[str, str, str, str, str, str]]
+            ] = []
             row: Tuple[int, str, str, str, str, str, str]
             # Type safety: iterating over `txn` yields `Tuple`, i.e.
             # `Tuple[Any, ...]` of arbitrary length. Mypy detects assigning a
@@ -2438,3 +2499,84 @@ class EventsWorkerStore(SQLBaseStore):
         )
 
         self.invalidate_get_event_cache_after_txn(txn, event_id)
+
+    async def get_events_sent_by_user_in_room(
+        self, user_id: str, room_id: str, limit: int, filter: Optional[List[str]] = None
+    ) -> Optional[List[str]]:
+        """
+        Get a list of event ids of events sent by the user in the specified room
+
+        Args:
+            user_id: user ID to search against
+            room_id: room ID of the room to search for events in
+            filter: type of events to filter for
+            limit: maximum number of event ids to return
+        """
+
+        def _get_events_by_user_in_room_txn(
+            txn: LoggingTransaction,
+            user_id: str,
+            room_id: str,
+            filter: Optional[List[str]],
+            batch_size: int,
+            offset: int,
+        ) -> Tuple[Optional[List[str]], int]:
+            if filter:
+                base_clause, args = make_in_list_sql_clause(
+                    txn.database_engine, "type", filter
+                )
+                clause = f"AND {base_clause}"
+                parameters = (user_id, room_id, *args, batch_size, offset)
+            else:
+                clause = ""
+                parameters = (user_id, room_id, batch_size, offset)
+
+            sql = f"""
+                    SELECT event_id FROM events
+                    WHERE sender = ? AND room_id = ?
+                    {clause}
+                    ORDER BY received_ts DESC
+                    LIMIT ?
+                    OFFSET ?
+                  """
+            txn.execute(sql, parameters)
+            res = txn.fetchall()
+            if res:
+                events = [row[0] for row in res]
+            else:
+                events = None
+
+            return events, offset + batch_size
+
+        offset = 0
+        batch_size = 100
+        if batch_size > limit:
+            batch_size = limit
+
+        selected_ids: List[str] = []
+        while offset < limit:
+            res, offset = await self.db_pool.runInteraction(
+                "get_events_by_user",
+                _get_events_by_user_in_room_txn,
+                user_id,
+                room_id,
+                filter,
+                batch_size,
+                offset,
+            )
+            if res:
+                selected_ids = selected_ids + res
+            else:
+                break
+        return selected_ids
+
+    async def have_finished_sliding_sync_background_jobs(self) -> bool:
+        """Return if it's safe to use the sliding sync membership tables."""
+
+        return await self.db_pool.updates.have_completed_background_updates(
+            (
+                _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE,
+                _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE,
+                _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+            )
+        )
diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py
index 6128332af8..7617fd3ad4 100644
--- a/synapse/storage/databases/main/media_repository.py
+++ b/synapse/storage/databases/main/media_repository.py
@@ -64,6 +64,7 @@ class LocalMedia:
     quarantined_by: Optional[str]
     safe_from_quarantine: bool
     user_id: Optional[str]
+    authenticated: Optional[bool]
 
 
 @attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -77,6 +78,7 @@ class RemoteMedia:
     created_ts: int
     last_access_ts: int
     quarantined_by: Optional[str]
+    authenticated: Optional[bool]
 
 
 @attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -218,6 +220,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                 "last_access_ts",
                 "safe_from_quarantine",
                 "user_id",
+                "authenticated",
             ),
             allow_none=True,
             desc="get_local_media",
@@ -235,6 +238,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
             last_access_ts=row[6],
             safe_from_quarantine=row[7],
             user_id=row[8],
+            authenticated=row[9],
         )
 
     async def get_local_media_by_user_paginate(
@@ -290,7 +294,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                     last_access_ts,
                     quarantined_by,
                     safe_from_quarantine,
-                    user_id
+                    user_id,
+                    authenticated
                 FROM local_media_repository
                 WHERE user_id = ?
                 ORDER BY {order_by_column} {order}, media_id ASC
@@ -314,6 +319,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                     quarantined_by=row[7],
                     safe_from_quarantine=bool(row[8]),
                     user_id=row[9],
+                    authenticated=row[10],
                 )
                 for row in txn
             ]
@@ -417,12 +423,18 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         time_now_ms: int,
         user_id: UserID,
     ) -> None:
+        if self.hs.config.media.enable_authenticated_media:
+            authenticated = True
+        else:
+            authenticated = False
+
         await self.db_pool.simple_insert(
             "local_media_repository",
             {
                 "media_id": media_id,
                 "created_ts": time_now_ms,
                 "user_id": user_id.to_string(),
+                "authenticated": authenticated,
             },
             desc="store_local_media_id",
         )
@@ -438,6 +450,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         user_id: UserID,
         url_cache: Optional[str] = None,
     ) -> None:
+        if self.hs.config.media.enable_authenticated_media:
+            authenticated = True
+        else:
+            authenticated = False
+
         await self.db_pool.simple_insert(
             "local_media_repository",
             {
@@ -448,6 +465,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                 "media_length": media_length,
                 "user_id": user_id.to_string(),
                 "url_cache": url_cache,
+                "authenticated": authenticated,
             },
             desc="store_local_media",
         )
@@ -638,6 +656,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                 "filesystem_id",
                 "last_access_ts",
                 "quarantined_by",
+                "authenticated",
             ),
             allow_none=True,
             desc="get_cached_remote_media",
@@ -654,6 +673,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
             filesystem_id=row[4],
             last_access_ts=row[5],
             quarantined_by=row[6],
+            authenticated=row[7],
         )
 
     async def store_cached_remote_media(
@@ -666,6 +686,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         upload_name: Optional[str],
         filesystem_id: str,
     ) -> None:
+        if self.hs.config.media.enable_authenticated_media:
+            authenticated = True
+        else:
+            authenticated = False
+
         await self.db_pool.simple_insert(
             "remote_media_cache",
             {
@@ -677,6 +702,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                 "upload_name": upload_name,
                 "filesystem_id": filesystem_id,
                 "last_access_ts": time_now_ms,
+                "authenticated": authenticated,
             },
             desc="store_cached_remote_media",
         )
diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py
index 996aea808d..41cf08211f 100644
--- a/synapse/storage/databases/main/profile.py
+++ b/synapse/storage/databases/main/profile.py
@@ -144,6 +144,16 @@ class ProfileWorkerStore(SQLBaseStore):
         return 50
 
     async def get_profileinfo(self, user_id: UserID) -> ProfileInfo:
+        """
+        Fetch the display name and avatar URL of a user.
+
+        Args:
+            user_id: The user ID to fetch the profile for.
+
+        Returns:
+            The user's display name and avatar URL. Values may be null if unset
+             or if the user doesn't exist.
+        """
         profile = await self.db_pool.simple_select_one(
             table="profiles",
             keyvalues={"full_user_id": user_id.to_string()},
@@ -158,6 +168,15 @@ class ProfileWorkerStore(SQLBaseStore):
         return ProfileInfo(avatar_url=profile[1], display_name=profile[0])
 
     async def get_profile_displayname(self, user_id: UserID) -> Optional[str]:
+        """
+        Fetch the display name of a user.
+
+        Args:
+            user_id: The user to get the display name for.
+
+        Raises:
+            404 if the user does not exist.
+        """
         return await self.db_pool.simple_select_one_onecol(
             table="profiles",
             keyvalues={"full_user_id": user_id.to_string()},
@@ -166,6 +185,15 @@ class ProfileWorkerStore(SQLBaseStore):
         )
 
     async def get_profile_avatar_url(self, user_id: UserID) -> Optional[str]:
+        """
+        Fetch the avatar URL of a user.
+
+        Args:
+            user_id: The user to get the avatar URL for.
+
+        Raises:
+            404 if the user does not exist.
+        """
         return await self.db_pool.simple_select_one_onecol(
             table="profiles",
             keyvalues={"full_user_id": user_id.to_string()},
@@ -174,6 +202,12 @@ class ProfileWorkerStore(SQLBaseStore):
         )
 
     async def create_profile(self, user_id: UserID) -> None:
+        """
+        Create a blank profile for a user.
+
+        Args:
+            user_id: The user to create the profile for.
+        """
         user_localpart = user_id.localpart
         await self.db_pool.simple_insert(
             table="profiles",
diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index 3b81ed943c..08244153a3 100644
--- a/synapse/storage/databases/main/purge_events.py
+++ b/synapse/storage/databases/main/purge_events.py
@@ -201,7 +201,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
         txn.execute_batch(
             "INSERT INTO event_backward_extremities (room_id, event_id)"
             " VALUES (?, ?)",
-            [(room_id, event_id) for event_id, in new_backwards_extrems],
+            [(room_id, event_id) for (event_id,) in new_backwards_extrems],
         )
 
         logger.info("[purge] finding state groups referenced by deleted events")
@@ -215,7 +215,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
         """
         )
 
-        referenced_state_groups = {sg for sg, in txn}
+        referenced_state_groups = {sg for (sg,) in txn}
         logger.info(
             "[purge] found %i referenced state groups", len(referenced_state_groups)
         )
@@ -454,6 +454,10 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
             # so must be deleted first.
             "local_current_membership",
             "room_memberships",
+            # Note: the sliding_sync_ tables have foreign keys to the `events` table
+            # so must be deleted first.
+            "sliding_sync_joined_rooms",
+            "sliding_sync_membership_snapshots",
             "events",
             "federation_inbound_events_staging",
             "receipts_graph",
diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index bbdde17711..86c87f78bf 100644
--- a/synapse/storage/databases/main/push_rule.py
+++ b/synapse/storage/databases/main/push_rule.py
@@ -109,6 +109,7 @@ def _load_rules(
         msc3664_enabled=experimental_config.msc3664_enabled,
         msc3381_polls_enabled=experimental_config.msc3381_polls_enabled,
         msc4028_push_encrypted_events=experimental_config.msc4028_push_encrypted_events,
+        msc4210_enabled=experimental_config.msc4210_enabled,
     )
 
     return filtered_rules
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index 3bde0ae0d4..9964331510 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -30,10 +30,12 @@ from typing import (
     Mapping,
     Optional,
     Sequence,
+    Set,
     Tuple,
     cast,
 )
 
+import attr
 from immutabledict import immutabledict
 
 from synapse.api.constants import EduTypes
@@ -43,6 +45,7 @@ from synapse.storage.database import (
     DatabasePool,
     LoggingDatabaseConnection,
     LoggingTransaction,
+    make_tuple_in_list_sql_clause,
 )
 from synapse.storage.engines._base import IsolationLevel
 from synapse.storage.util.id_generators import MultiWriterIdGenerator
@@ -51,10 +54,12 @@ from synapse.types import (
     JsonMapping,
     MultiWriterStreamToken,
     PersistedPosition,
+    StrCollection,
 )
 from synapse.util import json_encoder
 from synapse.util.caches.descriptors import cached, cachedList
 from synapse.util.caches.stream_change_cache import StreamChangeCache
+from synapse.util.iterutils import batch_iter
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -62,6 +67,57 @@ if TYPE_CHECKING:
 logger = logging.getLogger(__name__)
 
 
+@attr.s(auto_attribs=True, slots=True, frozen=True)
+class ReceiptInRoom:
+    receipt_type: str
+    user_id: str
+    event_id: str
+    thread_id: Optional[str]
+    data: JsonMapping
+
+    @staticmethod
+    def merge_to_content(receipts: Collection["ReceiptInRoom"]) -> JsonMapping:
+        """Merge the given set of receipts (in a room) into the receipt
+        content format.
+
+        Returns:
+            A mapping of the combined receipts: event ID -> receipt type -> user
+            ID -> receipt data.
+        """
+        # MSC4102: always replace threaded receipts with unthreaded ones if
+        # there is a clash. This means we will drop some receipts, but MSC4102
+        # is designed to drop semantically meaningless receipts, so this is
+        # okay. Previously, we would drop meaningful data!
+        #
+        # We do this by finding the unthreaded receipts, and then filtering out
+        # matching threaded receipts.
+
+        # Set of (user_id, event_id)
+        unthreaded_receipts: Set[Tuple[str, str]] = {
+            (receipt.user_id, receipt.event_id)
+            for receipt in receipts
+            if receipt.thread_id is None
+        }
+
+        # event_id -> receipt_type -> user_id -> receipt data
+        content: Dict[str, Dict[str, Dict[str, JsonMapping]]] = {}
+        for receipt in receipts:
+            data = receipt.data
+            if receipt.thread_id is not None:
+                if (receipt.user_id, receipt.event_id) in unthreaded_receipts:
+                    # Ignore threaded receipts if we have an unthreaded one.
+                    continue
+
+                data = dict(data)
+                data["thread_id"] = receipt.thread_id
+
+            content.setdefault(receipt.event_id, {}).setdefault(
+                receipt.receipt_type, {}
+            )[receipt.user_id] = data
+
+        return content
+
+
 class ReceiptsWorkerStore(SQLBaseStore):
     def __init__(
         self,
@@ -398,7 +454,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
         def f(
             txn: LoggingTransaction,
-        ) -> List[Tuple[str, str, str, str, Optional[str], str]]:
+        ) -> Mapping[str, Sequence[ReceiptInRoom]]:
             if from_key:
                 sql = """
                     SELECT stream_id, instance_name, room_id, receipt_type,
@@ -428,50 +484,46 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
                 txn.execute(sql + clause, [to_key.get_max_stream_pos()] + list(args))
 
-            return [
-                (room_id, receipt_type, user_id, event_id, thread_id, data)
-                for stream_id, instance_name, room_id, receipt_type, user_id, event_id, thread_id, data in txn
-                if MultiWriterStreamToken.is_stream_position_in_range(
+            results: Dict[str, List[ReceiptInRoom]] = {}
+            for (
+                stream_id,
+                instance_name,
+                room_id,
+                receipt_type,
+                user_id,
+                event_id,
+                thread_id,
+                data,
+            ) in txn:
+                if not MultiWriterStreamToken.is_stream_position_in_range(
                     from_key, to_key, instance_name, stream_id
+                ):
+                    continue
+
+                results.setdefault(room_id, []).append(
+                    ReceiptInRoom(
+                        receipt_type=receipt_type,
+                        user_id=user_id,
+                        event_id=event_id,
+                        thread_id=thread_id,
+                        data=db_to_json(data),
+                    )
                 )
-            ]
+
+            return results
 
         txn_results = await self.db_pool.runInteraction(
             "_get_linearized_receipts_for_rooms", f
         )
 
-        results: JsonDict = {}
-        for room_id, receipt_type, user_id, event_id, thread_id, data in txn_results:
-            # We want a single event per room, since we want to batch the
-            # receipts by room, event and type.
-            room_event = results.setdefault(
-                room_id,
-                {"type": EduTypes.RECEIPT, "room_id": room_id, "content": {}},
-            )
-
-            # The content is of the form:
-            # {"$foo:bar": { "read": { "@user:host": <receipt> }, .. }, .. }
-            event_entry = room_event["content"].setdefault(event_id, {})
-            receipt_type_dict = event_entry.setdefault(receipt_type, {})
-
-            # MSC4102: always replace threaded receipts with unthreaded ones if there is a clash.
-            # Specifically:
-            # - if there is no existing receipt, great, set the data.
-            # - if there is an existing receipt, is it threaded (thread_id present)?
-            #    YES: replace if this receipt has no thread id. NO: do not replace.
-            # This means we will drop some receipts, but MSC4102 is designed to drop semantically
-            # meaningless receipts, so this is okay. Previously, we would drop meaningful data!
-            receipt_data = db_to_json(data)
-            if user_id in receipt_type_dict:  # existing receipt
-                # is the existing receipt threaded and we are currently processing an unthreaded one?
-                if "thread_id" in receipt_type_dict[user_id] and not thread_id:
-                    receipt_type_dict[user_id] = (
-                        receipt_data  # replace with unthreaded one
-                    )
-            else:  # receipt does not exist, just set it
-                receipt_type_dict[user_id] = receipt_data
-                if thread_id:
-                    receipt_type_dict[user_id]["thread_id"] = thread_id
+        results: JsonDict = {
+            room_id: {
+                "room_id": room_id,
+                "type": EduTypes.RECEIPT,
+                "content": ReceiptInRoom.merge_to_content(receipts),
+            }
+            for room_id, receipts in txn_results.items()
+        }
 
         results = {
             room_id: [results[room_id]] if room_id in results else []
@@ -479,6 +531,69 @@ class ReceiptsWorkerStore(SQLBaseStore):
         }
         return results
 
+    async def get_linearized_receipts_for_events(
+        self,
+        room_and_event_ids: Collection[Tuple[str, str]],
+    ) -> Mapping[str, Sequence[ReceiptInRoom]]:
+        """Get all receipts for the given set of events.
+
+        Arguments:
+            room_and_event_ids: A collection of 2-tuples of room ID and
+                event IDs to fetch receipts for
+
+        Returns:
+            A list of receipts, one per room.
+        """
+        if not room_and_event_ids:
+            return {}
+
+        def get_linearized_receipts_for_events_txn(
+            txn: LoggingTransaction,
+            room_id_event_id_tuples: Collection[Tuple[str, str]],
+        ) -> List[Tuple[str, str, str, str, Optional[str], str]]:
+            clause, args = make_tuple_in_list_sql_clause(
+                self.database_engine, ("room_id", "event_id"), room_id_event_id_tuples
+            )
+
+            sql = f"""
+                SELECT room_id, receipt_type, user_id, event_id, thread_id, data
+                FROM receipts_linearized
+                WHERE {clause}
+            """
+
+            txn.execute(sql, args)
+
+            return txn.fetchall()
+
+        # room_id -> receipts
+        room_to_receipts: Dict[str, List[ReceiptInRoom]] = {}
+        for batch in batch_iter(room_and_event_ids, 1000):
+            batch_results = await self.db_pool.runInteraction(
+                "get_linearized_receipts_for_events",
+                get_linearized_receipts_for_events_txn,
+                batch,
+            )
+
+            for (
+                room_id,
+                receipt_type,
+                user_id,
+                event_id,
+                thread_id,
+                data,
+            ) in batch_results:
+                room_to_receipts.setdefault(room_id, []).append(
+                    ReceiptInRoom(
+                        receipt_type=receipt_type,
+                        user_id=user_id,
+                        event_id=event_id,
+                        thread_id=thread_id,
+                        data=db_to_json(data),
+                    )
+                )
+
+        return room_to_receipts
+
     @cached(
         num_args=2,
     )
@@ -550,6 +665,114 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
         return results
 
+    async def get_linearized_receipts_for_user_in_rooms(
+        self, user_id: str, room_ids: StrCollection, to_key: MultiWriterStreamToken
+    ) -> Mapping[str, Sequence[ReceiptInRoom]]:
+        """Fetch all receipts for the user in the given room.
+
+        Returns:
+            A dict from room ID to receipts in the room.
+        """
+
+        def get_linearized_receipts_for_user_in_rooms_txn(
+            txn: LoggingTransaction,
+            batch_room_ids: StrCollection,
+        ) -> List[Tuple[str, str, str, str, Optional[str], str]]:
+            clause, args = make_in_list_sql_clause(
+                self.database_engine, "room_id", batch_room_ids
+            )
+
+            sql = f"""
+                SELECT instance_name, stream_id, room_id, receipt_type, user_id, event_id, thread_id, data
+                FROM receipts_linearized
+                WHERE {clause} AND user_id = ? AND stream_id <= ?
+            """
+
+            args.append(user_id)
+            args.append(to_key.get_max_stream_pos())
+
+            txn.execute(sql, args)
+
+            return [
+                (room_id, receipt_type, user_id, event_id, thread_id, data)
+                for instance_name, stream_id, room_id, receipt_type, user_id, event_id, thread_id, data in txn
+                if MultiWriterStreamToken.is_stream_position_in_range(
+                    low=None,
+                    high=to_key,
+                    instance_name=instance_name,
+                    pos=stream_id,
+                )
+            ]
+
+        # room_id -> receipts
+        room_to_receipts: Dict[str, List[ReceiptInRoom]] = {}
+        for batch in batch_iter(room_ids, 1000):
+            batch_results = await self.db_pool.runInteraction(
+                "get_linearized_receipts_for_events",
+                get_linearized_receipts_for_user_in_rooms_txn,
+                batch,
+            )
+
+            for (
+                room_id,
+                receipt_type,
+                user_id,
+                event_id,
+                thread_id,
+                data,
+            ) in batch_results:
+                room_to_receipts.setdefault(room_id, []).append(
+                    ReceiptInRoom(
+                        receipt_type=receipt_type,
+                        user_id=user_id,
+                        event_id=event_id,
+                        thread_id=thread_id,
+                        data=db_to_json(data),
+                    )
+                )
+
+        return room_to_receipts
+
+    async def get_rooms_with_receipts_between(
+        self,
+        room_ids: StrCollection,
+        from_key: MultiWriterStreamToken,
+        to_key: MultiWriterStreamToken,
+    ) -> StrCollection:
+        """Given a set of room_ids, find out which ones (may) have receipts
+        between the two tokens (> `from_token` and <= `to_token`)."""
+
+        room_ids = self._receipts_stream_cache.get_entities_changed(
+            room_ids, from_key.stream
+        )
+        if not room_ids:
+            return []
+
+        def f(txn: LoggingTransaction, room_ids: StrCollection) -> StrCollection:
+            clause, args = make_in_list_sql_clause(
+                self.database_engine, "room_id", room_ids
+            )
+
+            sql = f"""
+                SELECT DISTINCT room_id FROM receipts_linearized
+                WHERE {clause} AND ? < stream_id AND stream_id <= ?
+            """
+            args.append(from_key.stream)
+            args.append(to_key.get_max_stream_pos())
+
+            txn.execute(sql, args)
+
+            return [room_id for (room_id,) in txn]
+
+        results: List[str] = []
+        for batch in batch_iter(room_ids, 1000):
+            batch_result = await self.db_pool.runInteraction(
+                "get_rooms_with_receipts_between", f, batch
+            )
+            results.extend(batch_result)
+
+        return results
+
     async def get_users_sent_receipts_between(
         self, last_id: int, current_id: int
     ) -> List[str]:
@@ -807,9 +1030,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
             SELECT event_id WHERE room_id = ? AND stream_ordering IN (
                 SELECT max(stream_ordering) WHERE %s
             )
-        """ % (
-            clause,
-        )
+        """ % (clause,)
 
         txn.execute(sql, [room_id] + list(args))
         rows = txn.fetchall()
@@ -954,6 +1175,12 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore):
             self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME,
             self._background_receipts_graph_unique_index,
         )
+        self.db_pool.updates.register_background_index_update(
+            update_name="receipts_room_id_event_id_index",
+            index_name="receipts_linearized_event_id",
+            table="receipts_linearized",
+            columns=("room_id", "event_id"),
+        )
 
     async def _populate_receipt_event_stream_ordering(
         self, progress: JsonDict, batch_size: int
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index df7f8a43b7..d7cbe33411 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -1250,9 +1250,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
                 SELECT address, session_id, medium, client_secret,
                 last_send_attempt, validated_at
                 FROM threepid_validation_session WHERE %s
-                """ % (
-                " AND ".join("%s = ?" % k for k in keyvalues.keys()),
-            )
+                """ % (" AND ".join("%s = ?" % k for k in keyvalues.keys()),)
 
             if validated is not None:
                 sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL")
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 80a4bf95f2..e0b7b7e194 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -1382,6 +1382,30 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
         partial_state_rooms = {row[0] for row in rows}
         return {room_id: room_id in partial_state_rooms for room_id in room_ids}
 
+    @cached(max_entries=10000, iterable=True)
+    async def get_partial_rooms(self) -> AbstractSet[str]:
+        """Get any "partial-state" rooms which the user is in.
+
+        This is fast as the set of partially stated rooms at any point across
+        the whole server is small, and so such a query is fast. This is also
+        faster than looking up whether a set of room ID's are partially stated
+        via `is_partial_state_room_batched(...)` because of the sheer amount of
+        CPU time looking all the rooms up in the cache.
+        """
+
+        def _get_partial_rooms_for_user_txn(
+            txn: LoggingTransaction,
+        ) -> AbstractSet[str]:
+            sql = """
+                SELECT room_id FROM partial_state_rooms
+            """
+            txn.execute(sql)
+            return {room_id for (room_id,) in txn}
+
+        return await self.db_pool.runInteraction(
+            "get_partial_rooms_for_user", _get_partial_rooms_for_user_txn
+        )
+
     async def get_join_event_id_and_device_lists_stream_id_for_partial_state(
         self, room_id: str
     ) -> Tuple[str, int]:
@@ -1608,9 +1632,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
                 FROM event_reports AS er
                 JOIN room_stats_state ON room_stats_state.room_id = er.room_id
                 {}
-                """.format(
-                where_clause
-            )
+                """.format(where_clause)
             txn.execute(sql, args)
             count = cast(Tuple[int], txn.fetchone())[0]
 
@@ -2343,6 +2365,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
         self._invalidate_cache_and_stream(
             txn, self._get_partial_state_servers_at_join, (room_id,)
         )
+        self._invalidate_all_cache_and_stream(txn, self.get_partial_rooms)
 
     async def write_partial_state_rooms_join_event_id(
         self,
@@ -2564,6 +2587,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
         self._invalidate_cache_and_stream(
             txn, self._get_partial_state_servers_at_join, (room_id,)
         )
+        self._invalidate_all_cache_and_stream(txn, self.get_partial_rooms)
 
         DatabasePool.simple_insert_txn(
             txn,
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index d8b54dc4e3..c77e009d03 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -19,6 +19,7 @@
 #
 #
 import logging
+from http import HTTPStatus
 from typing import (
     TYPE_CHECKING,
     AbstractSet,
@@ -39,6 +40,9 @@ from typing import (
 import attr
 
 from synapse.api.constants import EventTypes, Membership
+from synapse.api.errors import Codes, SynapseError
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.logging.opentracing import trace
 from synapse.metrics import LaterGauge
 from synapse.metrics.background_process_metrics import wrap_as_background_process
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
@@ -51,10 +55,10 @@ from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
 from synapse.storage.databases.main.events_worker import EventsWorkerStore
 from synapse.storage.engines import Sqlite3Engine
 from synapse.storage.roommember import (
-    GetRoomsForUserWithStreamOrdering,
     MemberSummary,
     ProfileInfo,
     RoomsForUser,
+    RoomsForUserSlidingSync,
 )
 from synapse.types import (
     JsonDict,
@@ -229,9 +233,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
                 AND m.room_id = c.room_id
                 AND m.user_id = c.state_key
                 WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? AND %s
-            """ % (
-                clause,
-            )
+            """ % (clause,)
             txn.execute(sql, (room_id, Membership.JOIN, *ids))
 
             return {r[0]: ProfileInfo(display_name=r[1], avatar_url=r[2]) for r in txn}
@@ -284,8 +286,19 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
 
     @cached(max_entries=100000)  # type: ignore[synapse-@cached-mutable]
     async def get_room_summary(self, room_id: str) -> Mapping[str, MemberSummary]:
-        """Get the details of a room roughly suitable for use by the room
+        """
+        Get the details of a room roughly suitable for use by the room
         summary extension to /sync. Useful when lazy loading room members.
+
+        Returns the total count of members in the room by membership type, and a
+        truncated list of members (the heroes). This will be the first 6 members of the
+        room:
+        - We want 5 heroes plus 1, in case one of them is the
+        calling user.
+        - They are ordered by `stream_ordering`, which are joined or
+        invited. When no joined or invited members are available, this also includes
+        banned and left users.
+
         Args:
             room_id: The room ID to query
         Returns:
@@ -299,37 +312,42 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
             # We do this all in one transaction to keep the cache small.
             # FIXME: get rid of this when we have room_stats
 
-            # Note, rejected events will have a null membership field, so
-            # we we manually filter them out.
-            sql = """
-                SELECT count(*), membership FROM current_state_events
-                WHERE type = 'm.room.member' AND room_id = ?
-                    AND membership IS NOT NULL
-                GROUP BY membership
-            """
+            counts = self._get_member_counts_txn(txn, room_id)
 
-            txn.execute(sql, (room_id,))
             res: Dict[str, MemberSummary] = {}
-            for count, membership in txn:
+            for membership, count in counts.items():
                 res.setdefault(membership, MemberSummary([], count))
 
-            # we order by membership and then fairly arbitrarily by event_id so
-            # heroes are consistent
-            # Note, rejected events will have a null membership field, so
-            # we we manually filter them out.
+            # Order by membership (joins -> invites -> leave (former insiders) ->
+            # everything else (outsiders like bans/knocks), then by `stream_ordering` so
+            # the first members in the room show up first and to make the sort stable
+            # (consistent heroes).
+            #
+            # Note: rejected events will have a null membership field, so we we manually
+            # filter them out.
             sql = """
                 SELECT state_key, membership, event_id
                 FROM current_state_events
                 WHERE type = 'm.room.member' AND room_id = ?
                     AND membership IS NOT NULL
                 ORDER BY
-                    CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
-                    event_id ASC
+                    CASE membership WHEN ? THEN 1 WHEN ? THEN 2 WHEN ? THEN 3 ELSE 4 END ASC,
+                    event_stream_ordering ASC
                 LIMIT ?
             """
 
-            # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user.
-            txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6))
+            txn.execute(
+                sql,
+                (
+                    room_id,
+                    # Sort order
+                    Membership.JOIN,
+                    Membership.INVITE,
+                    Membership.LEAVE,
+                    # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user.
+                    6,
+                ),
+            )
             for user_id, membership, event_id in txn:
                 summary = res[membership]
                 # we will always have a summary for this membership type at this
@@ -343,6 +361,31 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
             "get_room_summary", _get_room_summary_txn
         )
 
+    @cached()
+    async def get_member_counts(self, room_id: str) -> Mapping[str, int]:
+        """Get a mapping of number of users by membership"""
+
+        return await self.db_pool.runInteraction(
+            "get_member_counts", self._get_member_counts_txn, room_id
+        )
+
+    def _get_member_counts_txn(
+        self, txn: LoggingTransaction, room_id: str
+    ) -> Dict[str, int]:
+        """Get a mapping of number of users by membership"""
+
+        # Note, rejected events will have a null membership field, so
+        # we we manually filter them out.
+        sql = """
+            SELECT count(*), membership FROM current_state_events
+            WHERE type = 'm.room.member' AND room_id = ?
+                AND membership IS NOT NULL
+            GROUP BY membership
+        """
+
+        txn.execute(sql, (room_id,))
+        return {membership: count for count, membership in txn}
+
     @cached()
     async def get_number_joined_users_in_room(self, room_id: str) -> int:
         return await self.db_pool.simple_select_one_onecol(
@@ -403,6 +446,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
                 return invite
         return None
 
+    @trace
     async def get_rooms_for_local_user_where_membership_is(
         self,
         user_id: str,
@@ -426,9 +470,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
         if not membership_list:
             return []
 
-        rooms = await self.db_pool.runInteraction(
-            "get_rooms_for_local_user_where_membership_is",
-            self._get_rooms_for_local_user_where_membership_is_txn,
+        # Convert membership list to frozen set as a) it needs to be hashable,
+        # and b) we don't care about the order.
+        membership_list = frozenset(membership_list)
+
+        rooms = await self._get_rooms_for_local_user_where_membership_is_inner(
             user_id,
             membership_list,
         )
@@ -447,6 +493,24 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
 
         return [room for room in rooms if room.room_id not in rooms_to_exclude]
 
+    @cached(max_entries=1000, tree=True)
+    async def _get_rooms_for_local_user_where_membership_is_inner(
+        self,
+        user_id: str,
+        membership_list: Collection[str],
+    ) -> Sequence[RoomsForUser]:
+        if not membership_list:
+            return []
+
+        rooms = await self.db_pool.runInteraction(
+            "get_rooms_for_local_user_where_membership_is",
+            self._get_rooms_for_local_user_where_membership_is_txn,
+            user_id,
+            membership_list,
+        )
+
+        return rooms
+
     def _get_rooms_for_local_user_where_membership_is_txn(
         self,
         txn: LoggingTransaction,
@@ -483,9 +547,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
             WHERE
                 user_id = ?
                 AND %s
-        """ % (
-            clause,
-        )
+        """ % (clause,)
 
         txn.execute(sql, (user_id, *args))
         results = [
@@ -494,7 +556,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
                 sender=sender,
                 membership=membership,
                 event_id=event_id,
-                event_pos=PersistedEventPosition(instance_name, stream_ordering),
+                event_pos=PersistedEventPosition(
+                    # If instance_name is null we default to "master"
+                    instance_name or "master",
+                    stream_ordering,
+                ),
                 room_version_id=room_version,
             )
             for room_id, sender, membership, event_id, instance_name, stream_ordering, room_version in txn
@@ -586,10 +652,8 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
         """
         # Paranoia check.
         if not self.hs.is_mine_id(user_id):
-            raise Exception(
-                "Cannot call 'get_local_current_membership_for_user_in_room' on "
-                "non-local user %s" % (user_id,),
-            )
+            message = f"Provided user_id {user_id} is a non-local user"
+            raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.BAD_JSON)
 
         results = cast(
             Optional[Tuple[str, str]],
@@ -606,53 +670,6 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
 
         return results
 
-    @cached(max_entries=500000, iterable=True)
-    async def get_rooms_for_user_with_stream_ordering(
-        self, user_id: str
-    ) -> FrozenSet[GetRoomsForUserWithStreamOrdering]:
-        """Returns a set of room_ids the user is currently joined to.
-
-        If a remote user only returns rooms this server is currently
-        participating in.
-
-        Args:
-            user_id
-
-        Returns:
-            Returns the rooms the user is in currently, along with the stream
-            ordering of the most recent join for that user and room, along with
-            the room version of the room.
-        """
-        return await self.db_pool.runInteraction(
-            "get_rooms_for_user_with_stream_ordering",
-            self._get_rooms_for_user_with_stream_ordering_txn,
-            user_id,
-        )
-
-    def _get_rooms_for_user_with_stream_ordering_txn(
-        self, txn: LoggingTransaction, user_id: str
-    ) -> FrozenSet[GetRoomsForUserWithStreamOrdering]:
-        # We use `current_state_events` here and not `local_current_membership`
-        # as a) this gets called with remote users and b) this only gets called
-        # for rooms the server is participating in.
-        sql = """
-            SELECT room_id, e.instance_name, e.stream_ordering
-            FROM current_state_events AS c
-            INNER JOIN events AS e USING (room_id, event_id)
-            WHERE
-                c.type = 'm.room.member'
-                AND c.state_key = ?
-                AND c.membership = ?
-        """
-
-        txn.execute(sql, (user_id, Membership.JOIN))
-        return frozenset(
-            GetRoomsForUserWithStreamOrdering(
-                room_id, PersistedEventPosition(instance, stream_id)
-            )
-            for room_id, instance, stream_id in txn
-        )
-
     async def get_users_server_still_shares_room_with(
         self, user_ids: Collection[str]
     ) -> Set[str]:
@@ -694,6 +711,27 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
 
         return {row[0] for row in txn}
 
+    async def get_rooms_user_currently_banned_from(
+        self, user_id: str
+    ) -> FrozenSet[str]:
+        """Returns a set of room_ids the user is currently banned from.
+
+        If a remote user only returns rooms this server is currently
+        participating in.
+        """
+        room_ids = await self.db_pool.simple_select_onecol(
+            table="current_state_events",
+            keyvalues={
+                "type": EventTypes.Member,
+                "membership": Membership.BAN,
+                "state_key": user_id,
+            },
+            retcol="room_id",
+            desc="get_rooms_user_currently_banned_from",
+        )
+
+        return frozenset(room_ids)
+
     @cached(max_entries=500000, iterable=True)
     async def get_rooms_for_user(self, user_id: str) -> FrozenSet[str]:
         """Returns a set of room_ids the user is currently joined to.
@@ -701,13 +739,6 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
         If a remote user only returns rooms this server is currently
         participating in.
         """
-        rooms = self.get_rooms_for_user_with_stream_ordering.cache.get_immediate(
-            (user_id,),
-            None,
-            update_metrics=False,
-        )
-        if rooms:
-            return frozenset(r.room_id for r in rooms)
 
         room_ids = await self.db_pool.simple_select_onecol(
             table="current_state_events",
@@ -817,7 +848,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
             """
 
             txn.execute(sql, (user_id, *args))
-            return {u: True for u, in txn}
+            return {u: True for (u,) in txn}
 
         to_return = {}
         for batch_user_ids in batch_iter(other_user_ids, 1000):
@@ -1035,7 +1066,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
                     AND room_id = ?
             """
             txn.execute(sql, (room_id,))
-            return {d for d, in txn}
+            return {d for (d,) in txn}
 
         return await self.db_pool.runInteraction(
             "get_current_hosts_in_room", get_current_hosts_in_room_txn
@@ -1103,7 +1134,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
             """
             txn.execute(sql, (room_id,))
             # `server_domain` will be `NULL` for malformed MXIDs with no colons.
-            return tuple(d for d, in txn if d is not None)
+            return tuple(d for (d,) in txn if d is not None)
 
         return await self.db_pool.runInteraction(
             "get_current_hosts_in_room_ordered", get_current_hosts_in_room_ordered_txn
@@ -1320,9 +1351,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
                 room_id = ? AND membership = ?
                 AND NOT (%s)
                 LIMIT 1
-        """ % (
-            clause,
-        )
+        """ % (clause,)
 
         def _is_local_host_in_room_ignoring_users_txn(
             txn: LoggingTransaction,
@@ -1346,11 +1375,20 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
                 keyvalues={"user_id": user_id, "room_id": room_id},
                 updatevalues={"forgotten": 1},
             )
+            self.db_pool.simple_update_txn(
+                txn,
+                table="sliding_sync_membership_snapshots",
+                keyvalues={"user_id": user_id, "room_id": room_id},
+                updatevalues={"forgotten": 1},
+            )
 
             self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id))
             self._invalidate_cache_and_stream(
                 txn, self.get_forgotten_rooms_for_user, (user_id,)
             )
+            self._invalidate_cache_and_stream(
+                txn, self.get_sliding_sync_rooms_for_user, (user_id,)
+            )
 
         await self.db_pool.runInteraction("forget_membership", f)
 
@@ -1380,6 +1418,159 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
             desc="room_forgetter_stream_pos",
         )
 
+    @cached(iterable=True, max_entries=10000)
+    async def get_sliding_sync_rooms_for_user(
+        self,
+        user_id: str,
+    ) -> Mapping[str, RoomsForUserSlidingSync]:
+        """Get all the rooms for a user to handle a sliding sync request.
+
+        Ignores forgotten rooms and rooms that the user has left themselves.
+
+        Returns:
+            Map from room ID to membership info
+        """
+
+        def get_sliding_sync_rooms_for_user_txn(
+            txn: LoggingTransaction,
+        ) -> Dict[str, RoomsForUserSlidingSync]:
+            # XXX: If you use any new columns that can change (like from
+            # `sliding_sync_joined_rooms` or `forgotten`), make sure to bust the
+            # `get_sliding_sync_rooms_for_user` cache in the appropriate places (and add
+            # tests).
+            sql = """
+                SELECT m.room_id, m.sender, m.membership, m.membership_event_id,
+                    r.room_version,
+                    m.event_instance_name, m.event_stream_ordering,
+                    m.has_known_state,
+                    COALESCE(j.room_type, m.room_type),
+                    COALESCE(j.is_encrypted, m.is_encrypted)
+                FROM sliding_sync_membership_snapshots AS m
+                INNER JOIN rooms AS r USING (room_id)
+                LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join')
+                WHERE user_id = ?
+                    AND m.forgotten = 0
+                    AND (m.membership != 'leave' OR m.user_id != m.sender)
+            """
+            txn.execute(sql, (user_id,))
+            return {
+                row[0]: RoomsForUserSlidingSync(
+                    room_id=row[0],
+                    sender=row[1],
+                    membership=row[2],
+                    event_id=row[3],
+                    room_version_id=row[4],
+                    event_pos=PersistedEventPosition(row[5], row[6]),
+                    has_known_state=bool(row[7]),
+                    room_type=row[8],
+                    is_encrypted=bool(row[9]),
+                )
+                for row in txn
+                # We filter out unknown room versions proactively. They
+                # shouldn't go down sync and their metadata may be in a broken
+                # state (causing errors).
+                if row[4] in KNOWN_ROOM_VERSIONS
+            }
+
+        return await self.db_pool.runInteraction(
+            "get_sliding_sync_rooms_for_user",
+            get_sliding_sync_rooms_for_user_txn,
+        )
+
+    async def get_sliding_sync_room_for_user(
+        self, user_id: str, room_id: str
+    ) -> Optional[RoomsForUserSlidingSync]:
+        """Get the sliding sync room entry for the given user and room."""
+
+        def get_sliding_sync_room_for_user_txn(
+            txn: LoggingTransaction,
+        ) -> Optional[RoomsForUserSlidingSync]:
+            sql = """
+                SELECT m.room_id, m.sender, m.membership, m.membership_event_id,
+                    r.room_version,
+                    m.event_instance_name, m.event_stream_ordering,
+                    m.has_known_state,
+                    COALESCE(j.room_type, m.room_type),
+                    COALESCE(j.is_encrypted, m.is_encrypted)
+                FROM sliding_sync_membership_snapshots AS m
+                INNER JOIN rooms AS r USING (room_id)
+                LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join')
+                WHERE user_id = ?
+                    AND m.forgotten = 0
+                    AND m.room_id = ?
+            """
+            txn.execute(sql, (user_id, room_id))
+            row = txn.fetchone()
+            if not row:
+                return None
+
+            return RoomsForUserSlidingSync(
+                room_id=row[0],
+                sender=row[1],
+                membership=row[2],
+                event_id=row[3],
+                room_version_id=row[4],
+                event_pos=PersistedEventPosition(row[5], row[6]),
+                has_known_state=bool(row[7]),
+                room_type=row[8],
+                is_encrypted=row[9],
+            )
+
+        return await self.db_pool.runInteraction(
+            "get_sliding_sync_room_for_user", get_sliding_sync_room_for_user_txn
+        )
+
+    async def get_sliding_sync_room_for_user_batch(
+        self, user_id: str, room_ids: StrCollection
+    ) -> Dict[str, RoomsForUserSlidingSync]:
+        """Get the sliding sync room entry for the given user and rooms."""
+
+        if not room_ids:
+            return {}
+
+        def get_sliding_sync_room_for_user_batch_txn(
+            txn: LoggingTransaction,
+        ) -> Dict[str, RoomsForUserSlidingSync]:
+            clause, args = make_in_list_sql_clause(
+                self.database_engine, "m.room_id", room_ids
+            )
+            sql = f"""
+                SELECT m.room_id, m.sender, m.membership, m.membership_event_id,
+                    r.room_version,
+                    m.event_instance_name, m.event_stream_ordering,
+                    m.has_known_state,
+                    COALESCE(j.room_type, m.room_type),
+                    COALESCE(j.is_encrypted, m.is_encrypted)
+                FROM sliding_sync_membership_snapshots AS m
+                INNER JOIN rooms AS r USING (room_id)
+                LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join')
+                WHERE m.forgotten = 0
+                    AND {clause}
+                    AND user_id = ?
+            """
+            args.append(user_id)
+            txn.execute(sql, args)
+
+            return {
+                row[0]: RoomsForUserSlidingSync(
+                    room_id=row[0],
+                    sender=row[1],
+                    membership=row[2],
+                    event_id=row[3],
+                    room_version_id=row[4],
+                    event_pos=PersistedEventPosition(row[5], row[6]),
+                    has_known_state=bool(row[7]),
+                    room_type=row[8],
+                    is_encrypted=row[9],
+                )
+                for row in txn
+            }
+
+        return await self.db_pool.runInteraction(
+            "get_sliding_sync_room_for_user_batch",
+            get_sliding_sync_room_for_user_batch_txn,
+        )
+
 
 class RoomMemberBackgroundUpdateStore(SQLBaseStore):
     def __init__(
@@ -1414,10 +1605,12 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
         self, progress: JsonDict, batch_size: int
     ) -> int:
         target_min_stream_id = progress.get(
-            "target_min_stream_id_inclusive", self._min_stream_order_on_start  # type: ignore[attr-defined]
+            "target_min_stream_id_inclusive",
+            self._min_stream_order_on_start,  # type: ignore[attr-defined]
         )
         max_stream_id = progress.get(
-            "max_stream_id_exclusive", self._stream_order_on_start + 1  # type: ignore[attr-defined]
+            "max_stream_id_exclusive",
+            self._stream_order_on_start + 1,  # type: ignore[attr-defined]
         )
 
         def add_membership_profile_txn(txn: LoggingTransaction) -> int:
@@ -1564,10 +1757,19 @@ def extract_heroes_from_room_summary(
 ) -> List[str]:
     """Determine the users that represent a room, from the perspective of the `me` user.
 
+    This function expects `MemberSummary.members` to already be sorted by
+    `stream_ordering` like the results from `get_room_summary(...)`.
+
     The rules which say which users we select are specified in the "Room Summary"
     section of
     https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv3sync
 
+
+    Args:
+        details: Mapping from membership type to member summary. We expect
+            `MemberSummary.members` to already be sorted by `stream_ordering`.
+        me: The user for whom we are determining the heroes for.
+
     Returns a list (possibly empty) of heroes' mxids.
     """
     empty_ms = MemberSummary([], 0)
@@ -1582,11 +1784,11 @@ def extract_heroes_from_room_summary(
         r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me
     ] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me]
 
-    # FIXME: order by stream ordering rather than as returned by SQL
+    # We expect `MemberSummary.members` to already be sorted by `stream_ordering`
     if joined_user_ids or invited_user_ids:
-        return sorted(joined_user_ids + invited_user_ids)[0:5]
+        return (joined_user_ids + invited_user_ids)[0:5]
     else:
-        return sorted(gone_user_ids)[0:5]
+        return gone_user_ids[0:5]
 
 
 @attr.s(slots=True, auto_attribs=True)
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index 20fcfd3122..b436275f3f 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -177,9 +177,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
             AND (%s)
             ORDER BY stream_ordering DESC
             LIMIT ?
-            """ % (
-                " OR ".join("type = '%s'" % (t,) for t in TYPES),
-            )
+            """ % (" OR ".join("type = '%s'" % (t,) for t in TYPES),)
 
             txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
 
diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py
new file mode 100644
index 0000000000..7b357c1ffe
--- /dev/null
+++ b/synapse/storage/databases/main/sliding_sync.py
@@ -0,0 +1,568 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2023 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+
+import logging
+from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, cast
+
+import attr
+
+from synapse.api.errors import SlidingSyncUnknownPosition
+from synapse.logging.opentracing import log_kv
+from synapse.storage._base import SQLBaseStore, db_to_json
+from synapse.storage.database import LoggingTransaction
+from synapse.types import MultiWriterStreamToken, RoomStreamToken
+from synapse.types.handlers.sliding_sync import (
+    HaveSentRoom,
+    HaveSentRoomFlag,
+    MutablePerConnectionState,
+    PerConnectionState,
+    RoomStatusMap,
+    RoomSyncConfig,
+)
+from synapse.util import json_encoder
+from synapse.util.caches.descriptors import cached
+
+if TYPE_CHECKING:
+    from synapse.storage.databases.main import DataStore
+
+logger = logging.getLogger(__name__)
+
+
+class SlidingSyncStore(SQLBaseStore):
+    async def get_latest_bump_stamp_for_room(
+        self,
+        room_id: str,
+    ) -> Optional[int]:
+        """
+        Get the `bump_stamp` for the room.
+
+        The `bump_stamp` is the `stream_ordering` of the last event according to the
+        `bump_event_types`. This helps clients sort more readily without them needing to
+        pull in a bunch of the timeline to determine the last activity.
+        `bump_event_types` is a thing because for example, we don't want display name
+        changes to mark the room as unread and bump it to the top. For encrypted rooms,
+        we just have to consider any activity as a bump because we can't see the content
+        and the client has to figure it out for themselves.
+
+        This should only be called where the server is participating
+        in the room (someone local is joined).
+
+        Returns:
+            The `bump_stamp` for the room (which can be `None`).
+        """
+
+        return cast(
+            Optional[int],
+            await self.db_pool.simple_select_one_onecol(
+                table="sliding_sync_joined_rooms",
+                keyvalues={"room_id": room_id},
+                retcol="bump_stamp",
+                # FIXME: This should be `False` once we bump `SCHEMA_COMPAT_VERSION` and run the
+                # foreground update for
+                # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked
+                # by https://github.com/element-hq/synapse/issues/17623)
+                #
+                # The should be `allow_none=False` in the future because event though
+                # `bump_stamp` itself can be `None`, we should have a row in the
+                # `sliding_sync_joined_rooms` table for any joined room.
+                allow_none=True,
+            ),
+        )
+
+    async def persist_per_connection_state(
+        self,
+        user_id: str,
+        device_id: str,
+        conn_id: str,
+        previous_connection_position: Optional[int],
+        per_connection_state: "MutablePerConnectionState",
+    ) -> int:
+        """Persist updates to the per-connection state for a sliding sync
+        connection.
+
+        Returns:
+            The connection position of the newly persisted state.
+        """
+
+        # This cast is safe because the downstream code only cares about
+        # `store.get_id_for_instance(...)` and `StreamWorkerStore` is mixed
+        # alongside `SlidingSyncStore` wherever we create a store.
+        store = cast("DataStore", self)
+
+        return await self.db_pool.runInteraction(
+            "persist_per_connection_state",
+            self.persist_per_connection_state_txn,
+            user_id=user_id,
+            device_id=device_id,
+            conn_id=conn_id,
+            previous_connection_position=previous_connection_position,
+            per_connection_state=await PerConnectionStateDB.from_state(
+                per_connection_state, store
+            ),
+        )
+
+    def persist_per_connection_state_txn(
+        self,
+        txn: LoggingTransaction,
+        user_id: str,
+        device_id: str,
+        conn_id: str,
+        previous_connection_position: Optional[int],
+        per_connection_state: "PerConnectionStateDB",
+    ) -> int:
+        # First we fetch (or create) the connection key associated with the
+        # previous connection position.
+        if previous_connection_position is not None:
+            # The `previous_connection_position` is a user-supplied value, so we
+            # need to make sure that the one they supplied is actually theirs.
+            sql = """
+                SELECT connection_key
+                FROM sliding_sync_connection_positions
+                INNER JOIN sliding_sync_connections USING (connection_key)
+                WHERE
+                    connection_position = ?
+                    AND user_id = ? AND effective_device_id = ? AND conn_id = ?
+            """
+            txn.execute(
+                sql, (previous_connection_position, user_id, device_id, conn_id)
+            )
+            row = txn.fetchone()
+            if row is None:
+                raise SlidingSyncUnknownPosition()
+
+            (connection_key,) = row
+        else:
+            # We're restarting the connection, so we clear the previous existing data we
+            # used to track it. We do this here to ensure that if we get lots of
+            # one-shot requests we don't stack up lots of entries. We have `ON DELETE
+            # CASCADE` setup on the dependent tables so this will clear out all the
+            # associated data.
+            self.db_pool.simple_delete_txn(
+                txn,
+                table="sliding_sync_connections",
+                keyvalues={
+                    "user_id": user_id,
+                    "effective_device_id": device_id,
+                    "conn_id": conn_id,
+                },
+            )
+
+            (connection_key,) = self.db_pool.simple_insert_returning_txn(
+                txn,
+                table="sliding_sync_connections",
+                values={
+                    "user_id": user_id,
+                    "effective_device_id": device_id,
+                    "conn_id": conn_id,
+                    "created_ts": self._clock.time_msec(),
+                },
+                returning=("connection_key",),
+            )
+
+        # Define a new connection position for the updates
+        (connection_position,) = self.db_pool.simple_insert_returning_txn(
+            txn,
+            table="sliding_sync_connection_positions",
+            values={
+                "connection_key": connection_key,
+                "created_ts": self._clock.time_msec(),
+            },
+            returning=("connection_position",),
+        )
+
+        # We need to deduplicate the `required_state` JSON. We do this by
+        # fetching all JSON associated with the connection and comparing that
+        # with the updates to `required_state`
+
+        # Dict from required state json -> required state ID
+        required_state_to_id: Dict[str, int] = {}
+        if previous_connection_position is not None:
+            rows = self.db_pool.simple_select_list_txn(
+                txn,
+                table="sliding_sync_connection_required_state",
+                keyvalues={"connection_key": connection_key},
+                retcols=("required_state_id", "required_state"),
+            )
+            for required_state_id, required_state in rows:
+                required_state_to_id[required_state] = required_state_id
+
+        room_to_state_ids: Dict[str, int] = {}
+        unique_required_state: Dict[str, List[str]] = {}
+        for room_id, room_state in per_connection_state.room_configs.items():
+            serialized_state = json_encoder.encode(
+                # We store the required state as a sorted list of event type /
+                # state key tuples.
+                sorted(
+                    (event_type, state_key)
+                    for event_type, state_keys in room_state.required_state_map.items()
+                    for state_key in state_keys
+                )
+            )
+
+            existing_state_id = required_state_to_id.get(serialized_state)
+            if existing_state_id is not None:
+                room_to_state_ids[room_id] = existing_state_id
+            else:
+                unique_required_state.setdefault(serialized_state, []).append(room_id)
+
+        # Insert any new `required_state` json we haven't previously seen.
+        for serialized_required_state, room_ids in unique_required_state.items():
+            (required_state_id,) = self.db_pool.simple_insert_returning_txn(
+                txn,
+                table="sliding_sync_connection_required_state",
+                values={
+                    "connection_key": connection_key,
+                    "required_state": serialized_required_state,
+                },
+                returning=("required_state_id",),
+            )
+            for room_id in room_ids:
+                room_to_state_ids[room_id] = required_state_id
+
+        # Copy over state from the previous connection position (we'll overwrite
+        # these rows with any changes).
+        if previous_connection_position is not None:
+            sql = """
+                INSERT INTO sliding_sync_connection_streams
+                (connection_position, stream, room_id, room_status, last_token)
+                SELECT ?, stream, room_id, room_status, last_token
+                FROM sliding_sync_connection_streams
+                WHERE connection_position = ?
+            """
+            txn.execute(sql, (connection_position, previous_connection_position))
+
+            sql = """
+                INSERT INTO sliding_sync_connection_room_configs
+                (connection_position, room_id, timeline_limit, required_state_id)
+                SELECT ?, room_id, timeline_limit, required_state_id
+                FROM sliding_sync_connection_room_configs
+                WHERE connection_position = ?
+            """
+            txn.execute(sql, (connection_position, previous_connection_position))
+
+        # We now upsert the changes to the various streams.
+        key_values = []
+        value_values = []
+        for room_id, have_sent_room in per_connection_state.rooms._statuses.items():
+            key_values.append((connection_position, "rooms", room_id))
+            value_values.append(
+                (have_sent_room.status.value, have_sent_room.last_token)
+            )
+
+        for room_id, have_sent_room in per_connection_state.receipts._statuses.items():
+            key_values.append((connection_position, "receipts", room_id))
+            value_values.append(
+                (have_sent_room.status.value, have_sent_room.last_token)
+            )
+
+        for (
+            room_id,
+            have_sent_room,
+        ) in per_connection_state.account_data._statuses.items():
+            key_values.append((connection_position, "account_data", room_id))
+            value_values.append(
+                (have_sent_room.status.value, have_sent_room.last_token)
+            )
+
+        self.db_pool.simple_upsert_many_txn(
+            txn,
+            table="sliding_sync_connection_streams",
+            key_names=(
+                "connection_position",
+                "stream",
+                "room_id",
+            ),
+            key_values=key_values,
+            value_names=(
+                "room_status",
+                "last_token",
+            ),
+            value_values=value_values,
+        )
+
+        # ... and upsert changes to the room configs.
+        keys = []
+        values = []
+        for room_id, room_config in per_connection_state.room_configs.items():
+            keys.append((connection_position, room_id))
+            values.append((room_config.timeline_limit, room_to_state_ids[room_id]))
+
+        self.db_pool.simple_upsert_many_txn(
+            txn,
+            table="sliding_sync_connection_room_configs",
+            key_names=(
+                "connection_position",
+                "room_id",
+            ),
+            key_values=keys,
+            value_names=(
+                "timeline_limit",
+                "required_state_id",
+            ),
+            value_values=values,
+        )
+
+        return connection_position
+
+    @cached(iterable=True, max_entries=100000)
+    async def get_and_clear_connection_positions(
+        self, user_id: str, device_id: str, conn_id: str, connection_position: int
+    ) -> "PerConnectionState":
+        """Get the per-connection state for the given connection position."""
+
+        per_connection_state_db = await self.db_pool.runInteraction(
+            "get_and_clear_connection_positions",
+            self._get_and_clear_connection_positions_txn,
+            user_id=user_id,
+            device_id=device_id,
+            conn_id=conn_id,
+            connection_position=connection_position,
+        )
+
+        # This cast is safe because the downstream code only cares about
+        # `store.get_id_for_instance(...)` and `StreamWorkerStore` is mixed
+        # alongside `SlidingSyncStore` wherever we create a store.
+        store = cast("DataStore", self)
+
+        return await per_connection_state_db.to_state(store)
+
+    def _get_and_clear_connection_positions_txn(
+        self,
+        txn: LoggingTransaction,
+        user_id: str,
+        device_id: str,
+        conn_id: str,
+        connection_position: int,
+    ) -> "PerConnectionStateDB":
+        # The `previous_connection_position` is a user-supplied value, so we
+        # need to make sure that the one they supplied is actually theirs.
+        sql = """
+            SELECT connection_key
+            FROM sliding_sync_connection_positions
+            INNER JOIN sliding_sync_connections USING (connection_key)
+            WHERE
+                connection_position = ?
+                AND user_id = ? AND effective_device_id = ? AND conn_id = ?
+        """
+        txn.execute(sql, (connection_position, user_id, device_id, conn_id))
+        row = txn.fetchone()
+        if row is None:
+            raise SlidingSyncUnknownPosition()
+
+        (connection_key,) = row
+
+        # Now that we have seen the client has received and used the connection
+        # position, we can delete all the other connection positions.
+        sql = """
+            DELETE FROM sliding_sync_connection_positions
+            WHERE connection_key = ? AND connection_position != ?
+        """
+        txn.execute(sql, (connection_key, connection_position))
+
+        # Fetch and create a mapping from required state ID to the actual
+        # required state for the connection.
+        rows = self.db_pool.simple_select_list_txn(
+            txn,
+            table="sliding_sync_connection_required_state",
+            keyvalues={"connection_key": connection_key},
+            retcols=(
+                "required_state_id",
+                "required_state",
+            ),
+        )
+
+        required_state_map: Dict[int, Dict[str, Set[str]]] = {}
+        for row in rows:
+            state = required_state_map[row[0]] = {}
+            for event_type, state_key in db_to_json(row[1]):
+                state.setdefault(event_type, set()).add(state_key)
+
+        # Get all the room configs, looking up the required state from the map
+        # above.
+        room_config_rows = self.db_pool.simple_select_list_txn(
+            txn,
+            table="sliding_sync_connection_room_configs",
+            keyvalues={"connection_position": connection_position},
+            retcols=(
+                "room_id",
+                "timeline_limit",
+                "required_state_id",
+            ),
+        )
+
+        room_configs: Dict[str, RoomSyncConfig] = {}
+        for (
+            room_id,
+            timeline_limit,
+            required_state_id,
+        ) in room_config_rows:
+            room_configs[room_id] = RoomSyncConfig(
+                timeline_limit=timeline_limit,
+                required_state_map=required_state_map[required_state_id],
+            )
+
+        # Now look up the per-room stream data.
+        rooms: Dict[str, HaveSentRoom[str]] = {}
+        receipts: Dict[str, HaveSentRoom[str]] = {}
+        account_data: Dict[str, HaveSentRoom[str]] = {}
+
+        receipt_rows = self.db_pool.simple_select_list_txn(
+            txn,
+            table="sliding_sync_connection_streams",
+            keyvalues={"connection_position": connection_position},
+            retcols=(
+                "stream",
+                "room_id",
+                "room_status",
+                "last_token",
+            ),
+        )
+        for stream, room_id, room_status, last_token in receipt_rows:
+            have_sent_room: HaveSentRoom[str] = HaveSentRoom(
+                status=HaveSentRoomFlag(room_status), last_token=last_token
+            )
+            if stream == "rooms":
+                rooms[room_id] = have_sent_room
+            elif stream == "receipts":
+                receipts[room_id] = have_sent_room
+            elif stream == "account_data":
+                account_data[room_id] = have_sent_room
+            else:
+                # For forwards compatibility we ignore unknown streams, as in
+                # future we want to be able to easily add more stream types.
+                logger.warning("Unrecognized sliding sync stream in DB %r", stream)
+
+        return PerConnectionStateDB(
+            rooms=RoomStatusMap(rooms),
+            receipts=RoomStatusMap(receipts),
+            account_data=RoomStatusMap(account_data),
+            room_configs=room_configs,
+        )
+
+
+@attr.s(auto_attribs=True, frozen=True)
+class PerConnectionStateDB:
+    """An equivalent to `PerConnectionState` that holds data in a format stored
+    in the DB.
+
+    The principle difference is that the tokens for the different streams are
+    serialized to strings.
+
+    When persisting this *only* contains updates to the state.
+    """
+
+    rooms: "RoomStatusMap[str]"
+    receipts: "RoomStatusMap[str]"
+    account_data: "RoomStatusMap[str]"
+
+    room_configs: Mapping[str, "RoomSyncConfig"]
+
+    @staticmethod
+    async def from_state(
+        per_connection_state: "MutablePerConnectionState", store: "DataStore"
+    ) -> "PerConnectionStateDB":
+        """Convert from a standard `PerConnectionState`"""
+        rooms = {
+            room_id: HaveSentRoom(
+                status=status.status,
+                last_token=(
+                    await status.last_token.to_string(store)
+                    if status.last_token is not None
+                    else None
+                ),
+            )
+            for room_id, status in per_connection_state.rooms.get_updates().items()
+        }
+
+        receipts = {
+            room_id: HaveSentRoom(
+                status=status.status,
+                last_token=(
+                    await status.last_token.to_string(store)
+                    if status.last_token is not None
+                    else None
+                ),
+            )
+            for room_id, status in per_connection_state.receipts.get_updates().items()
+        }
+
+        account_data = {
+            room_id: HaveSentRoom(
+                status=status.status,
+                last_token=(
+                    str(status.last_token) if status.last_token is not None else None
+                ),
+            )
+            for room_id, status in per_connection_state.account_data.get_updates().items()
+        }
+
+        log_kv(
+            {
+                "rooms": rooms,
+                "receipts": receipts,
+                "account_data": account_data,
+                "room_configs": per_connection_state.room_configs.maps[0],
+            }
+        )
+
+        return PerConnectionStateDB(
+            rooms=RoomStatusMap(rooms),
+            receipts=RoomStatusMap(receipts),
+            account_data=RoomStatusMap(account_data),
+            room_configs=per_connection_state.room_configs.maps[0],
+        )
+
+    async def to_state(self, store: "DataStore") -> "PerConnectionState":
+        """Convert into a standard `PerConnectionState`"""
+        rooms = {
+            room_id: HaveSentRoom(
+                status=status.status,
+                last_token=(
+                    await RoomStreamToken.parse(store, status.last_token)
+                    if status.last_token is not None
+                    else None
+                ),
+            )
+            for room_id, status in self.rooms._statuses.items()
+        }
+
+        receipts = {
+            room_id: HaveSentRoom(
+                status=status.status,
+                last_token=(
+                    await MultiWriterStreamToken.parse(store, status.last_token)
+                    if status.last_token is not None
+                    else None
+                ),
+            )
+            for room_id, status in self.receipts._statuses.items()
+        }
+
+        account_data = {
+            room_id: HaveSentRoom(
+                status=status.status,
+                last_token=(
+                    int(status.last_token) if status.last_token is not None else None
+                ),
+            )
+            for room_id, status in self.account_data._statuses.items()
+        }
+
+        return PerConnectionState(
+            rooms=RoomStatusMap(rooms),
+            receipts=RoomStatusMap(receipts),
+            account_data=RoomStatusMap(account_data),
+            room_configs=self.room_configs,
+        )
diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index b2a67aff89..60312d770d 100644
--- a/synapse/storage/databases/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -30,6 +30,7 @@ from typing import (
     Iterable,
     List,
     Mapping,
+    MutableMapping,
     Optional,
     Set,
     Tuple,
@@ -41,7 +42,7 @@ from typing import (
 
 import attr
 
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventContentFields, EventTypes, Membership
 from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
 from synapse.events import EventBase
@@ -72,10 +73,18 @@ logger = logging.getLogger(__name__)
 
 _T = TypeVar("_T")
 
-
 MAX_STATE_DELTA_HOPS = 100
 
 
+# Freeze so it's immutable and we can use it as a cache value
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class Sentinel:
+    pass
+
+
+ROOM_UNKNOWN_SENTINEL = Sentinel()
+
+
 @attr.s(slots=True, frozen=True, auto_attribs=True)
 class EventMetadata:
     """Returned by `get_metadata_for_events`"""
@@ -298,6 +307,210 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         create_event = await self.get_event(create_id)
         return create_event
 
+    @cached(max_entries=10000)
+    async def get_room_type(self, room_id: str) -> Union[Optional[str], Sentinel]:
+        """Fetch room type for given room.
+
+        Since this function is cached, any missing values would be cached as
+        `None`. In order to distinguish between an unencrypted room that has
+        `None` encryption and a room that is unknown to the server where we
+        might want to omit the value (which would make it cached as `None`),
+        instead we use the sentinel value `ROOM_UNKNOWN_SENTINEL`.
+        """
+
+        try:
+            create_event = await self.get_create_event_for_room(room_id)
+            return create_event.content.get(EventContentFields.ROOM_TYPE)
+        except NotFoundError:
+            # We use the sentinel value to distinguish between `None` which is a
+            # valid room type and a room that is unknown to the server so the value
+            # is just unset.
+            return ROOM_UNKNOWN_SENTINEL
+
+    @cachedList(cached_method_name="get_room_type", list_name="room_ids")
+    async def bulk_get_room_type(
+        self, room_ids: Set[str]
+    ) -> Mapping[str, Union[Optional[str], Sentinel]]:
+        """
+        Bulk fetch room types for the given rooms (via current state).
+
+        Since this function is cached, any missing values would be cached as `None`. In
+        order to distinguish between an unencrypted room that has `None` encryption and
+        a room that is unknown to the server where we might want to omit the value
+        (which would make it cached as `None`), instead we use the sentinel value
+        `ROOM_UNKNOWN_SENTINEL`.
+
+        Returns:
+            A mapping from room ID to the room's type (`None` is a valid room type).
+            Rooms unknown to this server will return `ROOM_UNKNOWN_SENTINEL`.
+        """
+
+        def txn(
+            txn: LoggingTransaction,
+        ) -> MutableMapping[str, Union[Optional[str], Sentinel]]:
+            clause, args = make_in_list_sql_clause(
+                txn.database_engine, "room_id", room_ids
+            )
+
+            # We can't rely on `room_stats_state.room_type` if the server has left the
+            # room because the `room_id` will still be in the table but everything will
+            # be set to `None` but `None` is a valid room type value. We join against
+            # the `room_stats_current` table which keeps track of the
+            # `current_state_events` count (and a proxy value `local_users_in_room`
+            # which can used to assume the server is participating in the room and has
+            # current state) to ensure that the data in `room_stats_state` is up-to-date
+            # with the current state.
+            #
+            # FIXME: Use `room_stats_current.current_state_events` instead of
+            # `room_stats_current.local_users_in_room` once
+            # https://github.com/element-hq/synapse/issues/17457 is fixed.
+            sql = f"""
+                SELECT room_id, room_type
+                FROM room_stats_state
+                INNER JOIN room_stats_current USING (room_id)
+                WHERE
+                    {clause}
+                    AND local_users_in_room > 0
+            """
+
+            txn.execute(sql, args)
+
+            room_id_to_type_map = {}
+            for row in txn:
+                room_id_to_type_map[row[0]] = row[1]
+
+            return room_id_to_type_map
+
+        results = await self.db_pool.runInteraction(
+            "bulk_get_room_type",
+            txn,
+        )
+
+        # If we haven't updated `room_stats_state` with the room yet, query the
+        # create events directly. This should happen only rarely so we don't
+        # mind if we do this in a loop.
+        for room_id in room_ids - results.keys():
+            try:
+                create_event = await self.get_create_event_for_room(room_id)
+                room_type = create_event.content.get(EventContentFields.ROOM_TYPE)
+                results[room_id] = room_type
+            except NotFoundError:
+                # We use the sentinel value to distinguish between `None` which is a
+                # valid room type and a room that is unknown to the server so the value
+                # is just unset.
+                results[room_id] = ROOM_UNKNOWN_SENTINEL
+
+        return results
+
+    @cached(max_entries=10000)
+    async def get_room_encryption(self, room_id: str) -> Optional[str]:
+        raise NotImplementedError()
+
+    @cachedList(cached_method_name="get_room_encryption", list_name="room_ids")
+    async def bulk_get_room_encryption(
+        self, room_ids: Set[str]
+    ) -> Mapping[str, Union[Optional[str], Sentinel]]:
+        """
+        Bulk fetch room encryption for the given rooms (via current state).
+
+        Since this function is cached, any missing values would be cached as `None`. In
+        order to distinguish between an unencrypted room that has `None` encryption and
+        a room that is unknown to the server where we might want to omit the value
+        (which would make it cached as `None`), instead we use the sentinel value
+        `ROOM_UNKNOWN_SENTINEL`.
+
+        Returns:
+            A mapping from room ID to the room's encryption algorithm if the room is
+            encrypted, otherwise `None`. Rooms unknown to this server will return
+            `ROOM_UNKNOWN_SENTINEL`.
+        """
+
+        def txn(
+            txn: LoggingTransaction,
+        ) -> MutableMapping[str, Union[Optional[str], Sentinel]]:
+            clause, args = make_in_list_sql_clause(
+                txn.database_engine, "room_id", room_ids
+            )
+
+            # We can't rely on `room_stats_state.encryption` if the server has left the
+            # room because the `room_id` will still be in the table but everything will
+            # be set to `None` but `None` is a valid encryption value. We join against
+            # the `room_stats_current` table which keeps track of the
+            # `current_state_events` count (and a proxy value `local_users_in_room`
+            # which can used to assume the server is participating in the room and has
+            # current state) to ensure that the data in `room_stats_state` is up-to-date
+            # with the current state.
+            #
+            # FIXME: Use `room_stats_current.current_state_events` instead of
+            # `room_stats_current.local_users_in_room` once
+            # https://github.com/element-hq/synapse/issues/17457 is fixed.
+            sql = f"""
+                SELECT room_id, encryption
+                FROM room_stats_state
+                INNER JOIN room_stats_current USING (room_id)
+                WHERE
+                    {clause}
+                    AND local_users_in_room > 0
+            """
+
+            txn.execute(sql, args)
+
+            room_id_to_encryption_map = {}
+            for row in txn:
+                room_id_to_encryption_map[row[0]] = row[1]
+
+            return room_id_to_encryption_map
+
+        results = await self.db_pool.runInteraction(
+            "bulk_get_room_encryption",
+            txn,
+        )
+
+        # If we haven't updated `room_stats_state` with the room yet, query the state
+        # directly. This should happen only rarely so we don't mind if we do this in a
+        # loop.
+        encryption_event_ids: List[str] = []
+        for room_id in room_ids - results.keys():
+            state_map = await self.get_partial_filtered_current_state_ids(
+                room_id,
+                state_filter=StateFilter.from_types(
+                    [
+                        (EventTypes.Create, ""),
+                        (EventTypes.RoomEncryption, ""),
+                    ]
+                ),
+            )
+            # We can use the create event as a canary to tell whether the server has
+            # seen the room before
+            create_event_id = state_map.get((EventTypes.Create, ""))
+            encryption_event_id = state_map.get((EventTypes.RoomEncryption, ""))
+
+            if create_event_id is None:
+                # We use the sentinel value to distinguish between `None` which is a
+                # valid room type and a room that is unknown to the server so the value
+                # is just unset.
+                results[room_id] = ROOM_UNKNOWN_SENTINEL
+                continue
+
+            if encryption_event_id is None:
+                results[room_id] = None
+            else:
+                encryption_event_ids.append(encryption_event_id)
+
+        encryption_event_map = await self.get_events(encryption_event_ids)
+
+        for encryption_event_id in encryption_event_ids:
+            encryption_event = encryption_event_map.get(encryption_event_id)
+            # If the curent state says there is an encryption event, we should have it
+            # in the database.
+            assert encryption_event is not None
+
+            results[encryption_event.room_id] = encryption_event.content.get(
+                EventContentFields.ENCRYPTION_ALGORITHM
+            )
+
+        return results
+
     @cached(max_entries=100000, iterable=True)
     async def get_partial_current_state_ids(self, room_id: str) -> StateMap[str]:
         """Get the current state event ids for a room based on the
@@ -338,7 +551,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             desc="check_if_events_in_current_state",
         )
 
-        return frozenset(event_id for event_id, in rows)
+        return frozenset(event_id for (event_id,) in rows)
 
     # FIXME: how should this be cached?
     @cancellable
@@ -539,6 +752,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
     CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
     EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
     DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events"
+    MEMBERS_CURRENT_STATE_UPDATE_NAME = "current_state_events_members_room_index"
 
     def __init__(
         self,
@@ -567,6 +781,13 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
             self.DELETE_CURRENT_STATE_UPDATE_NAME,
             self._background_remove_left_rooms,
         )
+        self.db_pool.updates.register_background_index_update(
+            self.MEMBERS_CURRENT_STATE_UPDATE_NAME,
+            index_name="current_state_events_members_room_index",
+            table="current_state_events",
+            columns=["room_id", "membership"],
+            where_clause="type='m.room.member'",
+        )
 
     async def _background_remove_left_rooms(
         self, progress: JsonDict, batch_size: int
diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py
index 036972ac25..ba52fff652 100644
--- a/synapse/storage/databases/main/state_deltas.py
+++ b/synapse/storage/databases/main/state_deltas.py
@@ -24,9 +24,13 @@ from typing import List, Optional, Tuple
 
 import attr
 
+from synapse.logging.opentracing import trace
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import LoggingTransaction
+from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause
+from synapse.storage.databases.main.stream import _filter_results_by_stream
+from synapse.types import RoomStreamToken, StrCollection
 from synapse.util.caches.stream_change_cache import StreamChangeCache
+from synapse.util.iterutils import batch_iter
 
 logger = logging.getLogger(__name__)
 
@@ -156,3 +160,138 @@ class StateDeltasStore(SQLBaseStore):
             "get_max_stream_id_in_current_state_deltas",
             self._get_max_stream_id_in_current_state_deltas_txn,
         )
+
+    def get_current_state_deltas_for_room_txn(
+        self,
+        txn: LoggingTransaction,
+        room_id: str,
+        *,
+        from_token: Optional[RoomStreamToken],
+        to_token: Optional[RoomStreamToken],
+    ) -> List[StateDelta]:
+        """
+        Get the state deltas between two tokens.
+
+        (> `from_token` and <= `to_token`)
+        """
+        from_clause = ""
+        from_args = []
+        if from_token is not None:
+            from_clause = "AND ? < stream_id"
+            from_args = [from_token.stream]
+
+        to_clause = ""
+        to_args = []
+        if to_token is not None:
+            to_clause = "AND stream_id <= ?"
+            to_args = [to_token.get_max_stream_pos()]
+
+        sql = f"""
+                SELECT instance_name, stream_id, type, state_key, event_id, prev_event_id
+                FROM current_state_delta_stream
+                WHERE room_id = ? {from_clause} {to_clause}
+                ORDER BY stream_id ASC
+            """
+        txn.execute(sql, [room_id] + from_args + to_args)
+
+        return [
+            StateDelta(
+                stream_id=row[1],
+                room_id=room_id,
+                event_type=row[2],
+                state_key=row[3],
+                event_id=row[4],
+                prev_event_id=row[5],
+            )
+            for row in txn
+            if _filter_results_by_stream(from_token, to_token, row[0], row[1])
+        ]
+
+    @trace
+    async def get_current_state_deltas_for_room(
+        self,
+        room_id: str,
+        *,
+        from_token: Optional[RoomStreamToken],
+        to_token: Optional[RoomStreamToken],
+    ) -> List[StateDelta]:
+        """
+        Get the state deltas between two tokens.
+
+        (> `from_token` and <= `to_token`)
+        """
+
+        if (
+            from_token is not None
+            and not self._curr_state_delta_stream_cache.has_entity_changed(
+                room_id, from_token.stream
+            )
+        ):
+            return []
+
+        return await self.db_pool.runInteraction(
+            "get_current_state_deltas_for_room",
+            self.get_current_state_deltas_for_room_txn,
+            room_id,
+            from_token=from_token,
+            to_token=to_token,
+        )
+
+    @trace
+    async def get_current_state_deltas_for_rooms(
+        self,
+        room_ids: StrCollection,
+        from_token: RoomStreamToken,
+        to_token: RoomStreamToken,
+    ) -> List[StateDelta]:
+        """Get the state deltas between two tokens for the set of rooms."""
+
+        room_ids = self._curr_state_delta_stream_cache.get_entities_changed(
+            room_ids, from_token.stream
+        )
+        if not room_ids:
+            return []
+
+        def get_current_state_deltas_for_rooms_txn(
+            txn: LoggingTransaction,
+            room_ids: StrCollection,
+        ) -> List[StateDelta]:
+            clause, args = make_in_list_sql_clause(
+                self.database_engine, "room_id", room_ids
+            )
+
+            sql = f"""
+                SELECT instance_name, stream_id, room_id, type, state_key, event_id, prev_event_id
+                FROM current_state_delta_stream
+                WHERE {clause} AND ? < stream_id AND stream_id <= ?
+                ORDER BY stream_id ASC
+            """
+            args.append(from_token.stream)
+            args.append(to_token.get_max_stream_pos())
+
+            txn.execute(sql, args)
+
+            return [
+                StateDelta(
+                    stream_id=row[1],
+                    room_id=row[2],
+                    event_type=row[3],
+                    state_key=row[4],
+                    event_id=row[5],
+                    prev_event_id=row[6],
+                )
+                for row in txn
+                if _filter_results_by_stream(from_token, to_token, row[0], row[1])
+            ]
+
+        results = []
+        for batch in batch_iter(room_ids, 1000):
+            deltas = await self.db_pool.runInteraction(
+                "get_current_state_deltas_for_rooms",
+                get_current_state_deltas_for_rooms_txn,
+                batch,
+            )
+
+            results.extend(deltas)
+
+        return results
diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py
index e9f6a918c7..79c49e7fd9 100644
--- a/synapse/storage/databases/main/stats.py
+++ b/synapse/storage/databases/main/stats.py
@@ -161,7 +161,7 @@ class StatsStore(StateDeltasStore):
                     LIMIT ?
                 """
             txn.execute(sql, (last_user_id, batch_size))
-            return [r for r, in txn]
+            return [r for (r,) in txn]
 
         users_to_work_on = await self.db_pool.runInteraction(
             "_populate_stats_process_users", _get_next_batch
@@ -207,7 +207,7 @@ class StatsStore(StateDeltasStore):
                     LIMIT ?
                 """
             txn.execute(sql, (last_room_id, batch_size))
-            return [r for r, in txn]
+            return [r for (r,) in txn]
 
         rooms_to_work_on = await self.db_pool.runInteraction(
             "populate_stats_rooms_get_batch", _get_next_batch
@@ -751,9 +751,7 @@ class StatsStore(StateDeltasStore):
                 LEFT JOIN profiles AS p ON lmr.user_id = p.full_user_id
                 {}
                 GROUP BY lmr.user_id, displayname
-            """.format(
-                where_clause
-            )
+            """.format(where_clause)
 
             # SQLite does not support SELECT COUNT(*) OVER()
             sql = """
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index d34376b8df..b4258a4436 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -21,7 +21,7 @@
 #
 #
 
-""" This module is responsible for getting events from the DB for pagination
+"""This module is responsible for getting events from the DB for pagination
 and event streaming.
 
 The order it returns events in depend on whether we are streaming forwards or
@@ -50,7 +50,9 @@ from typing import (
     Dict,
     Iterable,
     List,
+    Mapping,
     Optional,
+    Protocol,
     Set,
     Tuple,
     cast,
@@ -59,7 +61,7 @@ from typing import (
 
 import attr
 from immutabledict import immutabledict
-from typing_extensions import Literal
+from typing_extensions import Literal, assert_never
 
 from twisted.internet import defer
 
@@ -67,7 +69,7 @@ from synapse.api.constants import Direction, EventTypes, Membership
 from synapse.api.filtering import Filter
 from synapse.events import EventBase
 from synapse.logging.context import make_deferred_yieldable, run_in_background
-from synapse.logging.opentracing import trace
+from synapse.logging.opentracing import tag_args, trace
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.database import (
     DatabasePool,
@@ -78,10 +80,11 @@ from synapse.storage.database import (
 from synapse.storage.databases.main.events_worker import EventsWorkerStore
 from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
 from synapse.storage.util.id_generators import MultiWriterIdGenerator
-from synapse.types import PersistedEventPosition, RoomStreamToken
-from synapse.util.caches.descriptors import cached
+from synapse.types import PersistedEventPosition, RoomStreamToken, StrCollection
+from synapse.util.caches.descriptors import cached, cachedList
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 from synapse.util.cancellation import cancellable
+from synapse.util.iterutils import batch_iter
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -96,6 +99,18 @@ _STREAM_TOKEN = "stream"
 _TOPOLOGICAL_TOKEN = "topological"
 
 
+class PaginateFunction(Protocol):
+    async def __call__(
+        self,
+        *,
+        room_id: str,
+        from_key: RoomStreamToken,
+        to_key: Optional[RoomStreamToken] = None,
+        direction: Direction = Direction.BACKWARDS,
+        limit: int = 0,
+    ) -> Tuple[List[EventBase], RoomStreamToken, bool]: ...
+
+
 # Used as return values for pagination APIs
 @attr.s(slots=True, frozen=True, auto_attribs=True)
 class _EventDictReturn:
@@ -279,7 +294,7 @@ def generate_pagination_bounds(
 
 
 def generate_next_token(
-    direction: Direction, last_topo_ordering: int, last_stream_ordering: int
+    direction: Direction, last_topo_ordering: Optional[int], last_stream_ordering: int
 ) -> RoomStreamToken:
     """
     Generate the next room stream token based on the currently returned data.
@@ -371,7 +386,7 @@ def _make_generic_sql_bound(
 def _filter_results(
     lower_token: Optional[RoomStreamToken],
     upper_token: Optional[RoomStreamToken],
-    instance_name: str,
+    instance_name: Optional[str],
     topological_ordering: int,
     stream_ordering: int,
 ) -> bool:
@@ -384,8 +399,14 @@ def _filter_results(
     position maps, which we handle by fetching more than necessary from the DB
     and then filtering (rather than attempting to construct a complicated SQL
     query).
+
+    The `instance_name` arg is optional to handle historic rows, and is
+    interpreted as if it was "master".
     """
 
+    if instance_name is None:
+        instance_name = "master"
+
     event_historical_tuple = (
         topological_ordering,
         stream_ordering,
@@ -420,7 +441,7 @@ def _filter_results(
 def _filter_results_by_stream(
     lower_token: Optional[RoomStreamToken],
     upper_token: Optional[RoomStreamToken],
-    instance_name: str,
+    instance_name: Optional[str],
     stream_ordering: int,
 ) -> bool:
     """
@@ -436,7 +457,13 @@ def _filter_results_by_stream(
     position maps, which we handle by fetching more than necessary from the DB
     and then filtering (rather than attempting to construct a complicated SQL
     query).
+
+    The `instance_name` arg is optional to handle historic rows, and is
+    interpreted as if it was "master".
     """
+    if instance_name is None:
+        instance_name = "master"
+
     if lower_token:
         assert lower_token.topological is None
 
@@ -646,33 +673,44 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
     async def get_room_events_stream_for_rooms(
         self,
+        *,
         room_ids: Collection[str],
         from_key: RoomStreamToken,
-        to_key: RoomStreamToken,
+        to_key: Optional[RoomStreamToken] = None,
+        direction: Direction = Direction.BACKWARDS,
         limit: int = 0,
-        order: str = "DESC",
-    ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken]]:
+    ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken, bool]]:
         """Get new room events in stream ordering since `from_key`.
 
         Args:
             room_ids
-            from_key: Token from which no events are returned before
-            to_key: Token from which no events are returned after. (This
-                is typically the current stream token)
+            from_key: The token to stream from (starting point and heading in the given
+                direction)
+            to_key: The token representing the end stream position (end point)
             limit: Maximum number of events to return
-            order: Either "DESC" or "ASC". Determines which events are
-                returned when the result is limited. If "DESC" then the most
-                recent `limit` events are returned, otherwise returns the
-                oldest `limit` events.
+            direction: Indicates whether we are paginating forwards or backwards
+                from `from_key`.
 
         Returns:
             A map from room id to a tuple containing:
                 - list of recent events in the room
                 - stream ordering key for the start of the chunk of events returned.
+                - a boolean to indicate if there were more events but we hit the limit
+
+            When Direction.FORWARDS: from_key < x <= to_key, (ascending order)
+            When Direction.BACKWARDS: from_key >= x > to_key, (descending order)
         """
-        room_ids = self._events_stream_cache.get_entities_changed(
-            room_ids, from_key.stream
-        )
+        if direction == Direction.FORWARDS:
+            room_ids = self._events_stream_cache.get_entities_changed(
+                room_ids, from_key.stream
+            )
+        elif direction == Direction.BACKWARDS:
+            if to_key is not None:
+                room_ids = self._events_stream_cache.get_entities_changed(
+                    room_ids, to_key.stream
+                )
+        else:
+            assert_never(direction)
 
         if not room_ids:
             return {}
@@ -684,12 +722,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                 defer.gatherResults(
                     [
                         run_in_background(
-                            self.get_room_events_stream_for_room,
-                            room_id,
-                            from_key,
-                            to_key,
-                            limit,
-                            order=order,
+                            self.paginate_room_events_by_stream_ordering,
+                            room_id=room_id,
+                            from_key=from_key,
+                            to_key=to_key,
+                            direction=direction,
+                            limit=limit,
                         )
                         for room_id in rm_ids
                     ],
@@ -713,91 +751,200 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             if self._events_stream_cache.has_entity_changed(room_id, from_id)
         }
 
-    async def get_room_events_stream_for_room(
+    async def get_rooms_that_have_updates_since_sliding_sync_table(
         self,
+        room_ids: StrCollection,
+        from_key: RoomStreamToken,
+    ) -> StrCollection:
+        """Return the rooms that probably have had updates since the given
+        token (changes that are > `from_key`)."""
+        # If the stream change cache is valid for the stream token, we can just
+        # use the result of that.
+        if from_key.stream >= self._events_stream_cache.get_earliest_known_position():
+            return self._events_stream_cache.get_entities_changed(
+                room_ids, from_key.stream
+            )
+
+        def get_rooms_that_have_updates_since_sliding_sync_table_txn(
+            txn: LoggingTransaction,
+        ) -> StrCollection:
+            sql = """
+                SELECT room_id
+                FROM sliding_sync_joined_rooms
+                WHERE {clause}
+                    AND event_stream_ordering > ?
+            """
+
+            results: Set[str] = set()
+            for batch in batch_iter(room_ids, 1000):
+                clause, args = make_in_list_sql_clause(
+                    self.database_engine, "room_id", batch
+                )
+
+                args.append(from_key.stream)
+                txn.execute(sql.format(clause=clause), args)
+
+                results.update(row[0] for row in txn)
+
+            return results
+
+        return await self.db_pool.runInteraction(
+            "get_rooms_that_have_updates_since_sliding_sync_table",
+            get_rooms_that_have_updates_since_sliding_sync_table_txn,
+        )
+
+    async def paginate_room_events_by_stream_ordering(
+        self,
+        *,
         room_id: str,
         from_key: RoomStreamToken,
-        to_key: RoomStreamToken,
+        to_key: Optional[RoomStreamToken] = None,
+        direction: Direction = Direction.BACKWARDS,
         limit: int = 0,
-        order: str = "DESC",
-    ) -> Tuple[List[EventBase], RoomStreamToken]:
-        """Get new room events in stream ordering since `from_key`.
+    ) -> Tuple[List[EventBase], RoomStreamToken, bool]:
+        """
+        Paginate events by `stream_ordering` in the room from the `from_key` in the
+        given `direction` to the `to_key` or `limit`.
 
         Args:
             room_id
-            from_key: Token from which no events are returned before
-            to_key: Token from which no events are returned after. (This
-                is typically the current stream token)
+            from_key: The token to stream from (starting point and heading in the given
+                direction)
+            to_key: The token representing the end stream position (end point)
+            direction: Indicates whether we are paginating forwards or backwards
+                from `from_key`.
             limit: Maximum number of events to return
-            order: Either "DESC" or "ASC". Determines which events are
-                returned when the result is limited. If "DESC" then the most
-                recent `limit` events are returned, otherwise returns the
-                oldest `limit` events.
 
         Returns:
-            The list of events (in ascending stream order) and the token from the start
-            of the chunk of events returned.
-        """
-        if from_key == to_key:
-            return [], from_key
+            The results as a list of events, a token that points to the end of
+            the result set, and a boolean to indicate if there were more events
+            but we hit the limit. If no events are returned then the end of the
+            stream has been reached (i.e. there are no events between `from_key`
+            and `to_key`).
 
-        has_changed = self._events_stream_cache.has_entity_changed(
-            room_id, from_key.stream
-        )
+            When Direction.FORWARDS: from_key < x <= to_key, (ascending order)
+            When Direction.BACKWARDS: from_key >= x > to_key, (descending order)
+        """
+
+        # FIXME: When going forwards, we should enforce that the `to_key` is not `None`
+        # because we always need an upper bound when querying the events stream (as
+        # otherwise we'll potentially pick up events that are not fully persisted).
+
+        # We should only be working with `stream_ordering` tokens here
+        assert from_key is None or from_key.topological is None
+        assert to_key is None or to_key.topological is None
+
+        # We can bail early if we're looking forwards, and our `to_key` is already
+        # before our `from_key`.
+        if (
+            direction == Direction.FORWARDS
+            and to_key is not None
+            and to_key.is_before_or_eq(from_key)
+        ):
+            # Token selection matches what we do below if there are no rows
+            return [], to_key if to_key else from_key, False
+        # Or vice-versa, if we're looking backwards and our `from_key` is already before
+        # our `to_key`.
+        elif (
+            direction == Direction.BACKWARDS
+            and to_key is not None
+            and from_key.is_before_or_eq(to_key)
+        ):
+            # Token selection matches what we do below if there are no rows
+            return [], to_key if to_key else from_key, False
+
+        # We can do a quick sanity check to see if any events have been sent in the room
+        # since the earlier token.
+        has_changed = True
+        if direction == Direction.FORWARDS:
+            has_changed = self._events_stream_cache.has_entity_changed(
+                room_id, from_key.stream
+            )
+        elif direction == Direction.BACKWARDS:
+            if to_key is not None:
+                has_changed = self._events_stream_cache.has_entity_changed(
+                    room_id, to_key.stream
+                )
+        else:
+            assert_never(direction)
 
         if not has_changed:
-            return [], from_key
+            # Token selection matches what we do below if there are no rows
+            return [], to_key if to_key else from_key, False
 
-        def f(txn: LoggingTransaction) -> List[_EventDictReturn]:
-            # To handle tokens with a non-empty instance_map we fetch more
-            # results than necessary and then filter down
-            min_from_id = from_key.stream
-            max_to_id = to_key.get_max_stream_pos()
+        order, from_bound, to_bound = generate_pagination_bounds(
+            direction, from_key, to_key
+        )
 
-            sql = """
-                SELECT event_id, instance_name, topological_ordering, stream_ordering
+        bounds = generate_pagination_where_clause(
+            direction=direction,
+            # The empty string will shortcut downstream code to only use the
+            # `stream_ordering` column
+            column_names=("", "stream_ordering"),
+            from_token=from_bound,
+            to_token=to_bound,
+            engine=self.database_engine,
+        )
+
+        def f(txn: LoggingTransaction) -> Tuple[List[_EventDictReturn], bool]:
+            sql = f"""
+                SELECT event_id, instance_name, stream_ordering
                 FROM events
                 WHERE
                     room_id = ?
                     AND not outlier
-                    AND stream_ordering > ? AND stream_ordering <= ?
-                ORDER BY stream_ordering %s LIMIT ?
-            """ % (
-                order,
-            )
-            txn.execute(sql, (room_id, min_from_id, max_to_id, 2 * limit))
+                    AND {bounds}
+                ORDER BY stream_ordering {order} LIMIT ?
+            """
+            txn.execute(sql, (room_id, 2 * limit))
+
+            # Get all the rows and check if we hit the limit.
+            fetched_rows = txn.fetchall()
+            limited = len(fetched_rows) >= 2 * limit
 
             rows = [
                 _EventDictReturn(event_id, None, stream_ordering)
-                for event_id, instance_name, topological_ordering, stream_ordering in txn
-                if _filter_results(
-                    from_key,
-                    to_key,
-                    instance_name,
-                    topological_ordering,
-                    stream_ordering,
+                for event_id, instance_name, stream_ordering in fetched_rows
+                if _filter_results_by_stream(
+                    lower_token=(
+                        to_key if direction == Direction.BACKWARDS else from_key
+                    ),
+                    upper_token=(
+                        from_key if direction == Direction.BACKWARDS else to_key
+                    ),
+                    instance_name=instance_name,
+                    stream_ordering=stream_ordering,
                 )
-            ][:limit]
-            return rows
+            ]
 
-        rows = await self.db_pool.runInteraction("get_room_events_stream_for_room", f)
+            if len(rows) > limit:
+                limited = True
+
+            rows = rows[:limit]
+            return rows, limited
+
+        rows, limited = await self.db_pool.runInteraction(
+            "get_room_events_stream_for_room", f
+        )
 
         ret = await self.get_events_as_list(
             [r.event_id for r in rows], get_prev_content=True
         )
 
-        if order.lower() == "desc":
-            ret.reverse()
-
         if rows:
-            key = RoomStreamToken(stream=min(r.stream_ordering for r in rows))
+            next_key = generate_next_token(
+                direction=direction,
+                last_topo_ordering=None,
+                last_stream_ordering=rows[-1].stream_ordering,
+            )
         else:
-            # Assume we didn't get anything because there was nothing to
-            # get.
-            key = from_key
+            # TODO (erikj): We should work out what to do here instead. (same as
+            # `_paginate_room_events_by_topological_ordering_txn(...)`)
+            next_key = to_key if to_key else from_key
 
-        return ret, key
+        return ret, next_key, limited
 
+    @trace
     async def get_current_state_delta_membership_changes_for_user(
         self,
         user_id: str,
@@ -836,6 +983,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         Returns:
             All membership changes to the current state in the token range. Events are
             sorted by `stream_ordering` ascending.
+
+            `event_id`/`sender` can be `None` when the server leaves a room (meaning
+            everyone locally left) or a state reset which removed the person from the
+            room. We can't tell the difference between the two cases with what's
+            available in the `current_state_delta_stream` table. To actually check for a
+            state reset, you need to check if a membership still exists in the room.
         """
         # Start by ruling out cases where a DB query is not necessary.
         if from_key == to_key:
@@ -912,7 +1065,6 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                 prev_sender,
             ) in txn:
                 assert room_id is not None
-                assert instance_name is not None
                 assert stream_ordering is not None
 
                 if _filter_results_by_stream(
@@ -936,7 +1088,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                         # Event
                         event_id=event_id,
                         event_pos=PersistedEventPosition(
-                            instance_name=instance_name,
+                            # If instance_name is null we default to "master"
+                            instance_name=instance_name or "master",
                             stream=stream_ordering,
                         ),
                         # When `s.event_id = null`, we won't be able to get respective
@@ -947,18 +1100,17 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                         membership=(
                             membership if membership is not None else Membership.LEAVE
                         ),
+                        # This will also be null for the same reasons if `s.event_id = null`
                         sender=sender,
                         # Prev event
                         prev_event_id=prev_event_id,
                         prev_event_pos=(
                             PersistedEventPosition(
-                                instance_name=prev_instance_name,
+                                # If instance_name is null we default to "master"
+                                instance_name=prev_instance_name or "master",
                                 stream=prev_stream_ordering,
                             )
-                            if (
-                                prev_instance_name is not None
-                                and prev_stream_ordering is not None
-                            )
+                            if (prev_stream_ordering is not None)
                             else None
                         ),
                         prev_membership=prev_membership,
@@ -1032,9 +1184,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                     AND e.stream_ordering > ? AND e.stream_ordering <= ?
                     %s
                 ORDER BY e.stream_ordering ASC
-            """ % (
-                ignore_room_clause,
-            )
+            """ % (ignore_room_clause,)
 
             txn.execute(sql, args)
 
@@ -1103,9 +1253,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         if limit == 0:
             return [], end_token
 
-        rows, token = await self.db_pool.runInteraction(
+        rows, token, _ = await self.db_pool.runInteraction(
             "get_recent_event_ids_for_room",
-            self._paginate_room_events_txn,
+            self._paginate_room_events_by_topological_ordering_txn,
             room_id,
             from_token=end_token,
             limit=limit,
@@ -1174,10 +1324,76 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         return None
 
+    async def get_last_event_pos_in_room(
+        self,
+        room_id: str,
+        event_types: Optional[StrCollection] = None,
+    ) -> Optional[Tuple[str, PersistedEventPosition]]:
+        """
+        Returns the ID and event position of the last event in a room.
+
+        Based on `get_last_event_pos_in_room_before_stream_ordering(...)`
+
+        Args:
+            room_id
+            event_types: Optional allowlist of event types to filter by
+
+        Returns:
+            The ID of the most recent event and it's position, or None if there are no
+            events in the room that match the given event types.
+        """
+
+        def _get_last_event_pos_in_room_txn(
+            txn: LoggingTransaction,
+        ) -> Optional[Tuple[str, PersistedEventPosition]]:
+            event_type_clause = ""
+            event_type_args: List[str] = []
+            if event_types is not None and len(event_types) > 0:
+                event_type_clause, event_type_args = make_in_list_sql_clause(
+                    txn.database_engine, "type", event_types
+                )
+                event_type_clause = f"AND {event_type_clause}"
+
+            sql = f"""
+            SELECT event_id, stream_ordering, instance_name
+            FROM events
+            LEFT JOIN rejections USING (event_id)
+            WHERE room_id = ?
+                {event_type_clause}
+                AND NOT outlier
+                AND rejections.event_id IS NULL
+            ORDER BY stream_ordering DESC
+            LIMIT 1
+            """
+
+            txn.execute(
+                sql,
+                [room_id] + event_type_args,
+            )
+
+            row = cast(Optional[Tuple[str, int, str]], txn.fetchone())
+            if row is not None:
+                event_id, stream_ordering, instance_name = row
+
+                return event_id, PersistedEventPosition(
+                    # If instance_name is null we default to "master"
+                    instance_name or "master",
+                    stream_ordering,
+                )
+
+            return None
+
+        return await self.db_pool.runInteraction(
+            "get_last_event_pos_in_room",
+            _get_last_event_pos_in_room_txn,
+        )
+
+    @trace
     async def get_last_event_pos_in_room_before_stream_ordering(
         self,
         room_id: str,
         end_token: RoomStreamToken,
+        event_types: Optional[StrCollection] = None,
     ) -> Optional[Tuple[str, PersistedEventPosition]]:
         """
         Returns the ID and event position of the last event in a room at or before a
@@ -1186,6 +1402,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         Args:
             room_id
             end_token: The token used to stream from
+            event_types: Optional allowlist of event types to filter by
 
         Returns:
             The ID of the most recent event and it's position, or None if there are no
@@ -1207,9 +1424,17 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             min_stream = end_token.stream
             max_stream = end_token.get_max_stream_pos()
 
-            # We use `union all` because we don't need any of the deduplication logic
-            # (`union` is really a union + distinct). `UNION ALL` does preserve the
-            # ordering of the operand queries but there is no actual gurantee that it
+            event_type_clause = ""
+            event_type_args: List[str] = []
+            if event_types is not None and len(event_types) > 0:
+                event_type_clause, event_type_args = make_in_list_sql_clause(
+                    txn.database_engine, "type", event_types
+                )
+                event_type_clause = f"AND {event_type_clause}"
+
+            # We use `UNION ALL` because we don't need any of the deduplication logic
+            # (`UNION` is really a `UNION` + `DISTINCT`). `UNION ALL` does preserve the
+            # ordering of the operand queries but there is no actual guarantee that it
             # has this behavior in all scenarios so we need the extra `ORDER BY` at the
             # bottom.
             sql = """
@@ -1218,6 +1443,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                     FROM events
                     LEFT JOIN rejections USING (event_id)
                     WHERE room_id = ?
+                        %s
                         AND ? < stream_ordering AND stream_ordering <= ?
                         AND NOT outlier
                         AND rejections.event_id IS NULL
@@ -1229,6 +1455,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                     FROM events
                     LEFT JOIN rejections USING (event_id)
                     WHERE room_id = ?
+                        %s
                         AND stream_ordering <= ?
                         AND NOT outlier
                         AND rejections.event_id IS NULL
@@ -1236,16 +1463,17 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                     LIMIT 1
                 ) AS b
                 ORDER BY stream_ordering DESC
-            """
+            """ % (
+                event_type_clause,
+                event_type_clause,
+            )
             txn.execute(
                 sql,
-                (
-                    room_id,
-                    min_stream,
-                    max_stream,
-                    room_id,
-                    min_stream,
-                ),
+                [room_id]
+                + event_type_args
+                + [min_stream, max_stream, room_id]
+                + event_type_args
+                + [min_stream],
             )
 
             for instance_name, stream_ordering, topological_ordering, event_id in txn:
@@ -1257,7 +1485,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                     stream_ordering=stream_ordering,
                 ):
                     return event_id, PersistedEventPosition(
-                        instance_name, stream_ordering
+                        # If instance_name is null we default to "master"
+                        instance_name or "master",
+                        stream_ordering,
                     )
 
             return None
@@ -1267,6 +1497,158 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             get_last_event_pos_in_room_before_stream_ordering_txn,
         )
 
+    async def bulk_get_last_event_pos_in_room_before_stream_ordering(
+        self,
+        room_ids: StrCollection,
+        end_token: RoomStreamToken,
+    ) -> Dict[str, int]:
+        """Bulk fetch the stream position of the latest events in the given
+        rooms
+        """
+
+        # First we just get the latest positions for the room, as the vast
+        # majority of them will be before the given end token anyway. By doing
+        # this we can cache most rooms.
+        uncapped_results = await self._bulk_get_max_event_pos(room_ids)
+
+        # Check that the stream position for the rooms are from before the
+        # minimum position of the token. If not then we need to fetch more
+        # rows.
+        results: Dict[str, int] = {}
+        recheck_rooms: Set[str] = set()
+        min_token = end_token.stream
+        for room_id, stream in uncapped_results.items():
+            if stream is None:
+                # Despite the function not directly setting None, the cache can!
+                # See: https://github.com/element-hq/synapse/issues/17726
+                continue
+            if stream <= min_token:
+                results[room_id] = stream
+            else:
+                recheck_rooms.add(room_id)
+
+        if not recheck_rooms:
+            return results
+
+        # There shouldn't be many rooms that we need to recheck, so we do them
+        # one-by-one.
+        for room_id in recheck_rooms:
+            result = await self.get_last_event_pos_in_room_before_stream_ordering(
+                room_id, end_token
+            )
+            if result is not None:
+                results[room_id] = result[1].stream
+
+        return results
+
+    @cached()
+    async def _get_max_event_pos(self, room_id: str) -> int:
+        raise NotImplementedError()
+
+    @cachedList(cached_method_name="_get_max_event_pos", list_name="room_ids")
+    async def _bulk_get_max_event_pos(
+        self, room_ids: StrCollection
+    ) -> Mapping[str, Optional[int]]:
+        """Fetch the max position of a persisted event in the room."""
+
+        # We need to be careful not to return positions ahead of the current
+        # positions, so we get the current token now and cap our queries to it.
+        now_token = self.get_room_max_token()
+        max_pos = now_token.get_max_stream_pos()
+
+        results: Dict[str, int] = {}
+
+        # First, we check for the rooms in the stream change cache to see if we
+        # can just use the latest position from it.
+        missing_room_ids: Set[str] = set()
+        for room_id in room_ids:
+            stream_pos = self._events_stream_cache.get_max_pos_of_last_change(room_id)
+            if stream_pos is not None:
+                results[room_id] = stream_pos
+            else:
+                missing_room_ids.add(room_id)
+
+        if not missing_room_ids:
+            return results
+
+        # Next, we query the stream position from the DB. At first we fetch all
+        # positions less than the *max* stream pos in the token, then filter
+        # them down. We do this as a) this is a cheaper query, and b) the vast
+        # majority of rooms will have a latest token from before the min stream
+        # pos.
+
+        def bulk_get_max_event_pos_fallback_txn(
+            txn: LoggingTransaction, batched_room_ids: StrCollection
+        ) -> Dict[str, int]:
+            clause, args = make_in_list_sql_clause(
+                self.database_engine, "room_id", batched_room_ids
+            )
+            sql = f"""
+                SELECT room_id, (
+                    SELECT stream_ordering FROM events AS e
+                    LEFT JOIN rejections USING (event_id)
+                    WHERE e.room_id = r.room_id
+                        AND e.stream_ordering <= ?
+                        AND NOT outlier
+                        AND rejection_reason IS NULL
+                    ORDER BY stream_ordering DESC
+                    LIMIT 1
+                )
+                FROM rooms AS r
+                WHERE {clause}
+            """
+            txn.execute(sql, [max_pos] + args)
+            return {row[0]: row[1] for row in txn}
+
+        # It's easier to look at the `sliding_sync_joined_rooms` table and avoid all of
+        # the joins and sub-queries.
+        def bulk_get_max_event_pos_from_sliding_sync_tables_txn(
+            txn: LoggingTransaction, batched_room_ids: StrCollection
+        ) -> Dict[str, int]:
+            clause, args = make_in_list_sql_clause(
+                self.database_engine, "room_id", batched_room_ids
+            )
+            sql = f"""
+                SELECT room_id, event_stream_ordering
+                FROM sliding_sync_joined_rooms
+                WHERE {clause}
+                ORDER BY event_stream_ordering DESC
+            """
+            txn.execute(sql, args)
+            return {row[0]: row[1] for row in txn}
+
+        recheck_rooms: Set[str] = set()
+        for batched in batch_iter(room_ids, 1000):
+            if await self.have_finished_sliding_sync_background_jobs():
+                batch_results = await self.db_pool.runInteraction(
+                    "bulk_get_max_event_pos_from_sliding_sync_tables_txn",
+                    bulk_get_max_event_pos_from_sliding_sync_tables_txn,
+                    batched,
+                )
+            else:
+                batch_results = await self.db_pool.runInteraction(
+                    "bulk_get_max_event_pos_fallback_txn",
+                    bulk_get_max_event_pos_fallback_txn,
+                    batched,
+                )
+            for room_id, stream_ordering in batch_results.items():
+                if stream_ordering <= now_token.stream:
+                    results[room_id] = stream_ordering
+                else:
+                    recheck_rooms.add(room_id)
+
+        # We now need to handle rooms where the above query returned a stream
+        # position that was potentially too new. This should happen very rarely
+        # so we just query the rooms one-by-one
+        for room_id in recheck_rooms:
+            result = await self.get_last_event_pos_in_room_before_stream_ordering(
+                room_id, now_token
+            )
+            if result is not None:
+                results[room_id] = result[1].stream
+
+        return results
+
     async def get_current_room_stream_token_for_room_id(
         self, room_id: str
     ) -> RoomStreamToken:
@@ -1475,7 +1857,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             topological=topological_ordering, stream=stream_ordering
         )
 
-        rows, start_token = self._paginate_room_events_txn(
+        rows, start_token, _ = self._paginate_room_events_by_topological_ordering_txn(
             txn,
             room_id,
             before_token,
@@ -1485,7 +1867,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         )
         events_before = [r.event_id for r in rows]
 
-        rows, end_token = self._paginate_room_events_txn(
+        rows, end_token, _ = self._paginate_room_events_by_topological_ordering_txn(
             txn,
             room_id,
             after_token,
@@ -1648,16 +2030,16 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
     def has_room_changed_since(self, room_id: str, stream_id: int) -> bool:
         return self._events_stream_cache.has_entity_changed(room_id, stream_id)
 
-    def _paginate_room_events_txn(
+    def _paginate_room_events_by_topological_ordering_txn(
         self,
         txn: LoggingTransaction,
         room_id: str,
         from_token: RoomStreamToken,
         to_token: Optional[RoomStreamToken] = None,
         direction: Direction = Direction.BACKWARDS,
-        limit: int = -1,
+        limit: int = 0,
         event_filter: Optional[Filter] = None,
-    ) -> Tuple[List[_EventDictReturn], RoomStreamToken]:
+    ) -> Tuple[List[_EventDictReturn], RoomStreamToken, bool]:
         """Returns list of events before or after a given token.
 
         Args:
@@ -1672,11 +2054,30 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                 those that match the filter.
 
         Returns:
-            A list of _EventDictReturn and a token that points to the end of the
-            result set. If no events are returned then the end of the stream has
-            been reached (i.e. there are no events between `from_token` and
-            `to_token`), or `limit` is zero.
+            A list of _EventDictReturn, a token that points to the end of the
+            result set, and a boolean to indicate if there were more events but
+            we hit the limit.  If no events are returned then the end of the
+            stream has been reached (i.e. there are no events between
+            `from_token` and `to_token`), or `limit` is zero.
         """
+        # We can bail early if we're looking forwards, and our `to_key` is already
+        # before our `from_token`.
+        if (
+            direction == Direction.FORWARDS
+            and to_token is not None
+            and to_token.is_before_or_eq(from_token)
+        ):
+            # Token selection matches what we do below if there are no rows
+            return [], to_token if to_token else from_token, False
+        # Or vice-versa, if we're looking backwards and our `from_token` is already before
+        # our `to_token`.
+        elif (
+            direction == Direction.BACKWARDS
+            and to_token is not None
+            and from_token.is_before_or_eq(to_token)
+        ):
+            # Token selection matches what we do below if there are no rows
+            return [], to_token if to_token else from_token, False
 
         args: List[Any] = [room_id]
 
@@ -1699,6 +2100,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             args.extend(filter_args)
 
         # We fetch more events as we'll filter the result set
+        requested_limit = int(limit) * 2
         args.append(int(limit) * 2)
 
         select_keywords = "SELECT"
@@ -1761,13 +2163,16 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             "bounds": bounds,
             "order": order,
         }
-
         txn.execute(sql, args)
 
+        # Get all the rows and check if we hit the limit.
+        fetched_rows = txn.fetchall()
+        limited = len(fetched_rows) >= requested_limit
+
         # Filter the result set.
         rows = [
             _EventDictReturn(event_id, topological_ordering, stream_ordering)
-            for event_id, instance_name, topological_ordering, stream_ordering in txn
+            for event_id, instance_name, topological_ordering, stream_ordering in fetched_rows
             if _filter_results(
                 lower_token=(
                     to_token if direction == Direction.BACKWARDS else from_token
@@ -1779,7 +2184,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
                 topological_ordering=topological_ordering,
                 stream_ordering=stream_ordering,
             )
-        ][:limit]
+        ]
+
+        if len(rows) > limit:
+            limited = True
+
+        rows = rows[:limit]
 
         if rows:
             assert rows[-1].topological_ordering is not None
@@ -1790,39 +2200,53 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             # TODO (erikj): We should work out what to do here instead.
             next_token = to_token if to_token else from_token
 
-        return rows, next_token
+        return rows, next_token, limited
 
     @trace
-    async def paginate_room_events(
+    @tag_args
+    async def paginate_room_events_by_topological_ordering(
         self,
+        *,
         room_id: str,
         from_key: RoomStreamToken,
         to_key: Optional[RoomStreamToken] = None,
         direction: Direction = Direction.BACKWARDS,
-        limit: int = -1,
+        limit: int = 0,
         event_filter: Optional[Filter] = None,
-    ) -> Tuple[List[EventBase], RoomStreamToken]:
-        """Returns list of events before or after a given token.
-
-        When Direction.FORWARDS: from_key < x <= to_key
-        When Direction.BACKWARDS: from_key >= x > to_key
+    ) -> Tuple[List[EventBase], RoomStreamToken, bool]:
+        """
+        Paginate events by `topological_ordering` (tie-break with `stream_ordering`) in
+        the room from the `from_key` in the given `direction` to the `to_key` or
+        `limit`.
 
         Args:
             room_id
-            from_key: The token used to stream from
-            to_key: A token which if given limits the results to only those before
+            from_key: The token to stream from (starting point and heading in the given
+                direction)
+            to_key: The token representing the end stream position (end point)
             direction: Indicates whether we are paginating forwards or backwards
                 from `from_key`.
-            limit: The maximum number of events to return.
+            limit: Maximum number of events to return
             event_filter: If provided filters the events to those that match the filter.
 
         Returns:
-            The results as a list of events and a token that points to the end
-            of the result set. If no events are returned then the end of the
+            The results as a list of events, a token that points to the end of
+            the result set, and a boolean to indicate if there were more events
+            but we hit the limit. If no events are returned then the end of the
             stream has been reached (i.e. there are no events between `from_key`
             and `to_key`).
+
+            When Direction.FORWARDS: from_key < x <= to_key, (ascending order)
+            When Direction.BACKWARDS: from_key >= x > to_key, (descending order)
         """
 
+        # FIXME: When going forwards, we should enforce that the `to_key` is not `None`
+        # because we always need an upper bound when querying the events stream (as
+        # otherwise we'll potentially pick up events that are not fully persisted).
+
+        # We have these checks outside of the transaction function (txn) to save getting
+        # a DB connection and switching threads if we don't need to.
+        #
         # We can bail early if we're looking forwards, and our `to_key` is already
         # before our `from_key`.
         if (
@@ -1832,7 +2256,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         ):
             # Token selection matches what we do in `_paginate_room_events_txn` if there
             # are no rows
-            return [], to_key if to_key else from_key
+            return [], to_key if to_key else from_key, False
         # Or vice-versa, if we're looking backwards and our `from_key` is already before
         # our `to_key`.
         elif (
@@ -1842,11 +2266,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         ):
             # Token selection matches what we do in `_paginate_room_events_txn` if there
             # are no rows
-            return [], to_key if to_key else from_key
+            return [], to_key if to_key else from_key, False
 
-        rows, token = await self.db_pool.runInteraction(
-            "paginate_room_events",
-            self._paginate_room_events_txn,
+        rows, token, limited = await self.db_pool.runInteraction(
+            "paginate_room_events_by_topological_ordering",
+            self._paginate_room_events_by_topological_ordering_txn,
             room_id,
             from_key,
             to_key,
@@ -1859,7 +2283,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             [r.event_id for r in rows], get_prev_content=True
         )
 
-        return events, token
+        return events, token, limited
 
     @cached()
     async def get_id_for_instance(self, instance_name: str) -> int:
@@ -1957,3 +2381,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             return RoomStreamToken(stream=last_position.stream - 1)
 
         return None
+
+    @trace
+    def get_rooms_that_might_have_updates(
+        self, room_ids: StrCollection, from_token: RoomStreamToken
+    ) -> StrCollection:
+        """Filters given room IDs down to those that might have updates, i.e.
+        removes rooms that definitely do not have updates.
+        """
+        return self._events_stream_cache.get_entities_changed(
+            room_ids, from_token.stream
+        )
diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py
index b5af294384..44f395f315 100644
--- a/synapse/storage/databases/main/tags.py
+++ b/synapse/storage/databases/main/tags.py
@@ -158,9 +158,56 @@ class TagsWorkerStore(AccountDataWorkerStore):
 
         return results
 
+    async def has_tags_changed_for_room(
+        self,
+        # Since there are multiple arguments with the same type, force keyword arguments
+        # so people don't accidentally swap the order
+        *,
+        user_id: str,
+        room_id: str,
+        from_stream_id: int,
+        to_stream_id: int,
+    ) -> bool:
+        """Check if the users tags for a room have been updated in the token range
+
+        (> `from_stream_id` and <= `to_stream_id`)
+
+        Args:
+            user_id: The user to get tags for
+            room_id: The room to get tags for
+            from_stream_id: The point in the stream to fetch from
+            to_stream_id: The point in the stream to fetch to
+
+        Returns:
+            A mapping of tags to tag content.
+        """
+
+        # Shortcut if no room has changed for the user
+        changed = self._account_data_stream_cache.has_entity_changed(
+            user_id, int(from_stream_id)
+        )
+        if not changed:
+            return False
+
+        last_change_position_for_room = await self.db_pool.simple_select_one_onecol(
+            table="room_tags_revisions",
+            keyvalues={"user_id": user_id, "room_id": room_id},
+            retcol="stream_id",
+            allow_none=True,
+        )
+
+        if last_change_position_for_room is None:
+            return False
+
+        return (
+            last_change_position_for_room > from_stream_id
+            and last_change_position_for_room <= to_stream_id
+        )
+
+    @cached(num_args=2, tree=True)
     async def get_tags_for_room(
         self, user_id: str, room_id: str
-    ) -> Dict[str, JsonDict]:
+    ) -> Mapping[str, JsonMapping]:
         """Get all the tags for the given room
 
         Args:
@@ -182,7 +229,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
         return {tag: db_to_json(content) for tag, content in rows}
 
     async def add_tag_to_room(
-        self, user_id: str, room_id: str, tag: str, content: JsonDict
+        self, user_id: str, room_id: str, tag: str, content: JsonMapping
     ) -> int:
         """Add a tag to a room for a user.
 
@@ -213,6 +260,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
             await self.db_pool.runInteraction("add_tag", add_tag_txn, next_id)
 
         self.get_tags_for_user.invalidate((user_id,))
+        self.get_tags_for_room.invalidate((user_id, room_id))
 
         return self._account_data_id_gen.get_current_token()
 
@@ -237,6 +285,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
             await self.db_pool.runInteraction("remove_tag", remove_tag_txn, next_id)
 
         self.get_tags_for_user.invalidate((user_id,))
+        self.get_tags_for_room.invalidate((user_id, room_id))
 
         return self._account_data_id_gen.get_current_token()
 
@@ -290,9 +339,19 @@ class TagsWorkerStore(AccountDataWorkerStore):
         rows: Iterable[Any],
     ) -> None:
         if stream_name == AccountDataStream.NAME:
-            for row in rows:
+            # Cast is safe because the `AccountDataStream` should only be giving us
+            # `AccountDataStreamRow`
+            account_data_stream_rows: List[AccountDataStream.AccountDataStreamRow] = (
+                cast(List[AccountDataStream.AccountDataStreamRow], rows)
+            )
+
+            for row in account_data_stream_rows:
                 if row.data_type == AccountDataTypes.TAG:
                     self.get_tags_for_user.invalidate((row.user_id,))
+                    if row.room_id:
+                        self.get_tags_for_room.invalidate((row.user_id, row.room_id))
+                    else:
+                        self.get_tags_for_room.invalidate((row.user_id,))
                     self._account_data_stream_cache.entity_has_changed(
                         row.user_id, token
                     )
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 6e18f714d7..51cffb0986 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -224,9 +224,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
                 SELECT room_id, events FROM %s
                 ORDER BY events DESC
                 LIMIT 250
-            """ % (
-                TEMP_TABLE + "_rooms",
-            )
+            """ % (TEMP_TABLE + "_rooms",)
             txn.execute(sql)
             rooms_to_work_on = cast(List[Tuple[str, int]], txn.fetchall())
 
diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py
index d4ac74c1ee..aea71b8fcc 100644
--- a/synapse/storage/databases/state/store.py
+++ b/synapse/storage/databases/state/store.py
@@ -767,7 +767,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
 
         remaining_state_groups = {
             state_group
-            for state_group, in rows
+            for (state_group,) in rows
             if state_group not in state_groups_to_delete
         }
 
diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py
index ad222e7e2d..9d82c59384 100644
--- a/synapse/storage/engines/_base.py
+++ b/synapse/storage/engines/_base.py
@@ -28,6 +28,11 @@ if TYPE_CHECKING:
     from synapse.storage.database import LoggingDatabaseConnection
 
 
+# A string that will be replaced with the appropriate auto increment directive
+# for the database engine, expands to an auto incrementing integer primary key.
+AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER = "$%AUTO_INCREMENT_PRIMARY_KEY%$"
+
+
 class IsolationLevel(IntEnum):
     READ_COMMITTED: int = 1
     REPEATABLE_READ: int = 2
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 90641d5a18..8c8c6d0414 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -25,6 +25,7 @@ from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, Tuple, cast
 import psycopg2.extensions
 
 from synapse.storage.engines._base import (
+    AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER,
     BaseDatabaseEngine,
     IncorrectDatabaseSetup,
     IsolationLevel,
@@ -256,4 +257,10 @@ class PostgresEngine(
         executing the script in its own transaction. The script transaction is
         left open and it is the responsibility of the caller to commit it.
         """
+        # Replace auto increment placeholder with the appropriate directive
+        script = script.replace(
+            AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER,
+            "BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY",
+        )
+
         cursor.execute(f"COMMIT; BEGIN TRANSACTION; {script}")
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index b11094c5c1..9d1795ebe5 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -25,6 +25,7 @@ import threading
 from typing import TYPE_CHECKING, Any, List, Mapping, Optional
 
 from synapse.storage.engines import BaseDatabaseEngine
+from synapse.storage.engines._base import AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER
 from synapse.storage.types import Cursor
 
 if TYPE_CHECKING:
@@ -168,6 +169,11 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]):
         > first. No other implicit transaction control is performed; any transaction
         > control must be added to sql_script.
         """
+        # Replace auto increment placeholder with the appropriate directive
+        script = script.replace(
+            AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, "INTEGER PRIMARY KEY AUTOINCREMENT"
+        )
+
         # The implementation of `executescript` can be found at
         # https://github.com/python/cpython/blob/3.11/Modules/_sqlite/cursor.c#L1035.
         cursor.executescript(f"BEGIN TRANSACTION; {script}")
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index aaffe5ecc9..bf087702ea 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -607,7 +607,7 @@ def _apply_module_schema_files(
         "SELECT file FROM applied_module_schemas WHERE module_name = ?",
         (modname,),
     )
-    applied_deltas = {d for d, in cur}
+    applied_deltas = {d for (d,) in cur}
     for name, stream in names_and_streams:
         if name in applied_deltas:
             continue
@@ -710,7 +710,7 @@ def _get_or_create_schema_state(
         "SELECT file FROM applied_schema_deltas WHERE version >= ?",
         (current_version,),
     )
-    applied_deltas = tuple(d for d, in txn)
+    applied_deltas = tuple(d for (d,) in txn)
 
     return _SchemaState(
         current_version=current_version,
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 80c9630867..9dc6c395e8 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -39,6 +39,34 @@ class RoomsForUser:
     room_version_id: str
 
 
+@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True)
+class RoomsForUserSlidingSync:
+    room_id: str
+    sender: Optional[str]
+    membership: str
+    event_id: Optional[str]
+    event_pos: PersistedEventPosition
+    room_version_id: str
+
+    has_known_state: bool
+    room_type: Optional[str]
+    is_encrypted: bool
+
+
+@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True)
+class RoomsForUserStateReset:
+    """A version of `RoomsForUser` that supports optional sender and event ID
+    fields, to handle state resets. State resets can affect room membership
+    without a corresponding event so that information isn't always available."""
+
+    room_id: str
+    sender: Optional[str]
+    membership: str
+    event_id: Optional[str]
+    event_pos: PersistedEventPosition
+    room_version_id: str
+
+
 @attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True)
 class GetRoomsForUserWithStreamOrdering:
     room_id: str
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index 0dc5d24249..f171f4568a 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -2,7 +2,7 @@
 # This file is licensed under the Affero General Public License (AGPL) version 3.
 #
 # Copyright 2021 The Matrix.org Foundation C.I.C.
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -19,7 +19,7 @@
 #
 #
 
-SCHEMA_VERSION = 85  # remember to update the list below when updating
+SCHEMA_VERSION = 88  # remember to update the list below when updating
 """Represents the expectations made by the codebase about the database schema
 
 This should be incremented whenever the codebase changes its requirements on the
@@ -139,6 +139,20 @@ Changes in SCHEMA_VERSION = 84
 
 Changes in SCHEMA_VERSION = 85
     - Add a column `suspended` to the `users` table
+
+Changes in SCHEMA_VERSION = 86
+    - Add a column `authenticated` to the tables `local_media_repository` and `remote_media_cache`
+
+Changes in SCHEMA_VERSION = 87
+    - Add tables to store Sliding Sync data for quick filtering/sorting
+      (`sliding_sync_joined_rooms`, `sliding_sync_membership_snapshots`)
+    - Add tables for storing the per-connection state for sliding sync requests:
+      sliding_sync_connections, sliding_sync_connection_positions, sliding_sync_connection_required_state,
+      sliding_sync_connection_room_configs, sliding_sync_connection_streams
+
+Changes in SCHEMA_VERSION = 88
+    - MSC4140: Add `delayed_events` table that keeps track of events that are to
+      be posted in response to a resettable timeout or an on-demand action.
 """
 
 
diff --git a/synapse/storage/schema/main/delta/56/unique_user_filter_index.py b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py
index 2461f87d77..b7535dae14 100644
--- a/synapse/storage/schema/main/delta/56/unique_user_filter_index.py
+++ b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py
@@ -41,8 +41,6 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
                 (user_id, filter_id);
             DROP TABLE user_filters;
             ALTER TABLE user_filters_migration RENAME TO user_filters;
-        """ % (
-        select_clause,
-    )
+        """ % (select_clause,)
 
     execute_statements_from_stream(cur, StringIO(sql))
diff --git a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py
index 5d3578eaf4..a847ef4147 100644
--- a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py
+++ b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py
@@ -23,6 +23,7 @@
 This migration handles the process of changing the type of `room_depth.min_depth` to
 a BIGINT.
 """
+
 from synapse.storage.database import LoggingTransaction
 from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
 
diff --git a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py
index b4d4b6536b..9ac3d1d31f 100644
--- a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py
+++ b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py
@@ -25,6 +25,7 @@ This migration adds triggers to the partial_state_events tables to enforce uniqu
 
 Triggers cannot be expressed in .sql files, so we have to use a separate file.
 """
+
 from synapse.storage.database import LoggingTransaction
 from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
 
diff --git a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py
index 93543fca7c..be80a6747d 100644
--- a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py
+++ b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py
@@ -26,6 +26,7 @@ for its completion can be removed.
 
 Note the background job must still remain defined in the database class.
 """
+
 from synapse.config.homeserver import HomeServerConfig
 from synapse.storage.database import LoggingTransaction
 from synapse.storage.engines import BaseDatabaseEngine
diff --git a/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py b/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py
index 6609ef0dac..a847a93494 100644
--- a/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py
+++ b/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py
@@ -24,6 +24,7 @@
 This migration adds triggers to the room membership tables to enforce consistency.
 Triggers cannot be expressed in .sql files, so we have to use a separate file.
 """
+
 from synapse.storage.database import LoggingTransaction
 from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
 
diff --git a/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py
index ad9c394162..1c823a3aa1 100644
--- a/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py
+++ b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py
@@ -23,6 +23,7 @@
 """
 This migration adds foreign key constraint to `event_forward_extremities` table.
 """
+
 from synapse.storage.background_updates import (
     ForeignKeyConstraint,
     run_validate_constraint_and_delete_rows_schema_delta,
diff --git a/synapse/storage/schema/main/delta/86/01_authenticate_media.sql b/synapse/storage/schema/main/delta/86/01_authenticate_media.sql
new file mode 100644
index 0000000000..c1ac01ae95
--- /dev/null
+++ b/synapse/storage/schema/main/delta/86/01_authenticate_media.sql
@@ -0,0 +1,15 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- <https://www.gnu.org/licenses/agpl-3.0.html>.
+
+ALTER TABLE remote_media_cache ADD COLUMN authenticated BOOLEAN DEFAULT FALSE NOT NULL;
+ALTER TABLE local_media_repository ADD COLUMN authenticated BOOLEAN DEFAULT FALSE NOT NULL;
diff --git a/synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql b/synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql
new file mode 100644
index 0000000000..e6db91e5b5
--- /dev/null
+++ b/synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql
@@ -0,0 +1,15 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- <https://www.gnu.org/licenses/agpl-3.0.html>.
+
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+    (8602, 'receipts_room_id_event_id_index', '{}');
diff --git a/synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql b/synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql
new file mode 100644
index 0000000000..2f71e541f8
--- /dev/null
+++ b/synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql
@@ -0,0 +1,169 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- <https://www.gnu.org/licenses/agpl-3.0.html>.
+
+-- This table is a list/queue used to keep track of which rooms need to be inserted into
+-- `sliding_sync_joined_rooms`. We do this to avoid reading from `current_state_events`
+-- during the background update to populate `sliding_sync_joined_rooms` which works but
+-- it takes a lot of work for the database to grab `DISTINCT` room_ids given how many
+-- state events there are for each room.
+--
+-- This table is prefilled with every room in the `rooms` table (see the
+-- `sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update` background
+-- update). This table is also updated whenever we come across stale data so that we can
+-- catch-up with all of the new data if Synapse was downgraded (see
+-- `_resolve_stale_data_in_sliding_sync_tables`).
+--
+-- FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+--  foreground update for
+-- `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+--  https://github.com/element-hq/synapse/issues/17623)
+CREATE TABLE IF NOT EXISTS sliding_sync_joined_rooms_to_recalculate(
+    room_id TEXT NOT NULL REFERENCES rooms(room_id),
+    PRIMARY KEY (room_id)
+);
+
+-- A table for storing room meta data (current state relevant to sliding sync) that the
+-- local server is still participating in (someone local is joined to the room).
+--
+-- We store the joined rooms in separate table from `sliding_sync_membership_snapshots`
+-- because we need up-to-date information for joined rooms and it can be shared across
+-- everyone who is joined.
+--
+-- This table is kept in sync with `current_state_events` which means if the server is
+-- no longer participating in a room, the row will be deleted.
+CREATE TABLE IF NOT EXISTS sliding_sync_joined_rooms(
+    room_id TEXT NOT NULL REFERENCES rooms(room_id),
+    -- The `stream_ordering` of the most-recent/latest event in the room
+    event_stream_ordering BIGINT NOT NULL REFERENCES events(stream_ordering),
+    -- The `stream_ordering` of the last event according to the `bump_event_types`
+    bump_stamp BIGINT,
+    -- `m.room.create` -> `content.type` (current state)
+    --
+    -- Useful for the `spaces`/`not_spaces` filter in the Sliding Sync API
+    room_type TEXT,
+    -- `m.room.name` -> `content.name` (current state)
+    --
+    -- Useful for the room meta data and `room_name_like` filter in the Sliding Sync API
+    room_name TEXT,
+    -- `m.room.encryption` -> `content.algorithm` (current state)
+    --
+    -- Useful for the `is_encrypted` filter in the Sliding Sync API
+    is_encrypted BOOLEAN DEFAULT FALSE NOT NULL,
+    -- `m.room.tombstone` -> `content.replacement_room` (according to the current state at the
+    -- time of the membership).
+    --
+    -- Useful for the `include_old_rooms` functionality in the Sliding Sync API
+    tombstone_successor_room_id TEXT,
+    PRIMARY KEY (room_id)
+);
+
+-- So we can purge rooms easily.
+--
+-- The primary key is already `room_id`
+
+-- So we can sort by `stream_ordering
+CREATE UNIQUE INDEX IF NOT EXISTS sliding_sync_joined_rooms_event_stream_ordering ON sliding_sync_joined_rooms(event_stream_ordering);
+
+-- A table for storing a snapshot of room meta data (historical current state relevant
+-- for sliding sync) at the time of a local user's membership. Only has rows for the
+-- latest membership event for a given local user in a room which matches
+-- `local_current_membership` .
+--
+-- We store all memberships including joins. This makes it easy to reference this table
+-- to find all membership for a given user and shares the same semantics as
+-- `local_current_membership`. And we get to avoid some table maintenance; if we only
+-- stored non-joins, we would have to delete the row for the user when the user joins
+-- the room. Stripped state doesn't include the `m.room.tombstone` event, so we just
+-- assume that the room doesn't have a tombstone.
+--
+-- For remote invite/knocks where the server is not participating in the room, we will
+-- use stripped state events to populate this table. We assume that if any stripped
+-- state is given, it will include all possible stripped state events types. For
+-- example, if stripped state is given but `m.room.encryption` isn't included, we will
+-- assume that the room is not encrypted.
+--
+-- We don't include `bump_stamp` here because we can just use the `stream_ordering` from
+-- the membership event itself as the `bump_stamp`.
+CREATE TABLE IF NOT EXISTS sliding_sync_membership_snapshots(
+    room_id TEXT NOT NULL REFERENCES rooms(room_id),
+    user_id TEXT NOT NULL,
+    -- Useful to be able to tell leaves from kicks (where the `user_id` is different from the `sender`)
+    sender TEXT NOT NULL,
+    membership_event_id TEXT NOT NULL REFERENCES events(event_id),
+    membership TEXT NOT NULL,
+    -- This is an integer just to match `room_memberships` and also means we don't need
+    -- to do any casting.
+    forgotten INTEGER DEFAULT 0 NOT NULL,
+    -- `stream_ordering` of the `membership_event_id`
+    event_stream_ordering BIGINT NOT NULL REFERENCES events(stream_ordering),
+    -- `instance_name` of the worker that persisted the `membership_event_id`.
+    -- Useful for crafting `PersistedEventPosition(...)`
+    event_instance_name TEXT NOT NULL,
+    -- For remote invites/knocks that don't include any stripped state, we want to be
+    -- able to distinguish between a room with `None` as valid value for some state and
+    -- room where the state is completely unknown. Basically, this should be True unless
+    -- no stripped state was provided for a remote invite/knock (False).
+    has_known_state BOOLEAN DEFAULT FALSE NOT NULL,
+    -- `m.room.create` -> `content.type` (according to the current state at the time of
+    -- the membership).
+    --
+    -- Useful for the `spaces`/`not_spaces` filter in the Sliding Sync API
+    room_type TEXT,
+    -- `m.room.name` -> `content.name` (according to the current state at the time of
+    -- the membership).
+    --
+    -- Useful for the room meta data and `room_name_like` filter in the Sliding Sync API
+    room_name TEXT,
+    -- `m.room.encryption` -> `content.algorithm` (according to the current state at the
+    -- time of the membership).
+    --
+    -- Useful for the `is_encrypted` filter in the Sliding Sync API
+    is_encrypted BOOLEAN DEFAULT FALSE NOT NULL,
+    -- `m.room.tombstone` -> `content.replacement_room` (according to the current state at the
+    -- time of the membership).
+    --
+    -- Useful for the `include_old_rooms` functionality in the Sliding Sync API
+    tombstone_successor_room_id TEXT,
+    PRIMARY KEY (room_id, user_id)
+);
+
+-- So we can purge rooms easily.
+--
+-- Since we're using a multi-column index as the primary key (room_id, user_id), the
+-- first index column (room_id) is always usable for searching so we don't need to
+-- create a separate index for it.
+--
+-- CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_room_id ON sliding_sync_membership_snapshots(room_id);
+
+-- So we can fetch all rooms for a given user
+CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_user_id ON sliding_sync_membership_snapshots(user_id);
+-- So we can sort by `stream_ordering
+CREATE UNIQUE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_event_stream_ordering ON sliding_sync_membership_snapshots(event_stream_ordering);
+
+
+-- Add a series of background updates to populate the new `sliding_sync_joined_rooms` table:
+--
+--   1. Add a background update to prefill `sliding_sync_joined_rooms_to_recalculate`.
+--      We do a one-shot bulk insert from the `rooms` table to prefill.
+--   2. Add a background update to populate the new `sliding_sync_joined_rooms` table
+--      based on the rooms listed in the `sliding_sync_joined_rooms_to_recalculate`
+--      table.
+--
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+  (8701, 'sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update', '{}');
+INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+  (8701, 'sliding_sync_joined_rooms_bg_update', '{}', 'sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update');
+
+-- Add a background updates to populate the new `sliding_sync_membership_snapshots` table
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+  (8701, 'sliding_sync_membership_snapshots_bg_update', '{}');
diff --git a/synapse/storage/schema/main/delta/87/02_per_connection_state.sql b/synapse/storage/schema/main/delta/87/02_per_connection_state.sql
new file mode 100644
index 0000000000..59bc14a2c9
--- /dev/null
+++ b/synapse/storage/schema/main/delta/87/02_per_connection_state.sql
@@ -0,0 +1,81 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- <https://www.gnu.org/licenses/agpl-3.0.html>.
+
+
+-- Table to track active sliding sync connections.
+--
+-- A new connection will be created for every sliding sync request without a
+-- `since` token for a given `conn_id` for a device.#
+--
+-- Once a new connection is created and used we delete all other connections for
+-- the `conn_id`.
+CREATE TABLE sliding_sync_connections(
+    connection_key $%AUTO_INCREMENT_PRIMARY_KEY%$,
+    user_id TEXT NOT NULL,
+    -- Generally the device ID, but may be something else for e.g. puppeted accounts.
+    effective_device_id TEXT NOT NULL,
+    conn_id TEXT NOT NULL,
+    created_ts BIGINT NOT NULL
+);
+
+CREATE INDEX sliding_sync_connections_idx ON sliding_sync_connections(user_id, effective_device_id, conn_id);
+CREATE INDEX sliding_sync_connections_ts_idx ON sliding_sync_connections(created_ts);
+
+-- We track per-connection state by associating changes to the state with
+-- connection positions. This ensures that we correctly track state even if we
+-- see retries of requests.
+--
+-- If the client starts a "new" connection (by not specifying a since token),
+-- we'll clear out the other connections (to ensure that we don't end up with
+-- lots of connection keys).
+CREATE TABLE sliding_sync_connection_positions(
+    connection_position $%AUTO_INCREMENT_PRIMARY_KEY%$,
+    connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE,
+    created_ts BIGINT NOT NULL
+);
+
+CREATE INDEX sliding_sync_connection_positions_key ON sliding_sync_connection_positions(connection_key);
+CREATE INDEX sliding_sync_connection_positions_ts_idx ON sliding_sync_connection_positions(created_ts);
+
+
+-- To save space we deduplicate the `required_state` json by assigning IDs to
+-- different values.
+CREATE TABLE sliding_sync_connection_required_state(
+    required_state_id $%AUTO_INCREMENT_PRIMARY_KEY%$,
+    connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE,
+    required_state TEXT NOT NULL  -- We store this as a json list of event type / state key tuples.
+);
+
+CREATE INDEX sliding_sync_connection_required_state_conn_pos ON sliding_sync_connection_required_state(connection_key);
+
+
+-- Stores the room configs we have seen for rooms in a connection.
+CREATE TABLE sliding_sync_connection_room_configs(
+    connection_position BIGINT NOT NULL REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE,
+    room_id TEXT NOT NULL,
+    timeline_limit BIGINT NOT NULL,
+    required_state_id BIGINT NOT NULL REFERENCES sliding_sync_connection_required_state(required_state_id)
+);
+
+CREATE UNIQUE INDEX sliding_sync_connection_room_configs_idx ON sliding_sync_connection_room_configs(connection_position, room_id);
+
+-- Stores what data we have sent for given streams down given connections.
+CREATE TABLE sliding_sync_connection_streams(
+    connection_position BIGINT NOT NULL REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE,
+    stream TEXT NOT NULL,  -- e.g. "events" or "receipts"
+    room_id TEXT NOT NULL,
+    room_status TEXT NOT NULL,  -- "live" or "previously", i.e. the `HaveSentRoomFlag` value
+    last_token TEXT  -- For "previously" the token for the stream we have sent up to.
+);
+
+CREATE UNIQUE INDEX sliding_sync_connection_streams_idx ON sliding_sync_connection_streams(connection_position, room_id, stream);
diff --git a/synapse/storage/schema/main/delta/87/03_current_state_index.sql b/synapse/storage/schema/main/delta/87/03_current_state_index.sql
new file mode 100644
index 0000000000..76b974271c
--- /dev/null
+++ b/synapse/storage/schema/main/delta/87/03_current_state_index.sql
@@ -0,0 +1,19 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- <https://www.gnu.org/licenses/agpl-3.0.html>.
+
+
+-- Add a background updates to add a new index:
+-- `current_state_events(room_id, membership) WHERE type = 'm.room.member'
+-- This makes counting membership in rooms (for syncs) much faster
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+  (8701, 'current_state_events_members_room_index', '{}');
diff --git a/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql b/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql
new file mode 100644
index 0000000000..78ba5129af
--- /dev/null
+++ b/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql
@@ -0,0 +1,43 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2024 New Vector, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- <https://www.gnu.org/licenses/agpl-3.0.html>.
+
+CREATE TABLE delayed_events (
+    delay_id TEXT NOT NULL,
+    user_localpart TEXT NOT NULL,
+    device_id TEXT,
+    delay BIGINT NOT NULL,
+    send_ts BIGINT NOT NULL,
+    room_id TEXT NOT NULL,
+    event_type TEXT NOT NULL,
+    state_key TEXT,
+    origin_server_ts BIGINT,
+    content bytea NOT NULL,
+    is_processed BOOLEAN NOT NULL DEFAULT FALSE,
+    PRIMARY KEY (user_localpart, delay_id)
+);
+
+CREATE INDEX delayed_events_send_ts ON delayed_events (send_ts);
+CREATE INDEX delayed_events_is_processed ON delayed_events (is_processed);
+CREATE INDEX delayed_events_room_state_event_idx ON delayed_events (room_id, event_type, state_key) WHERE state_key IS NOT NULL;
+
+CREATE TABLE delayed_events_stream_pos (
+    Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE,  -- Makes sure this table only has one row.
+    stream_id BIGINT NOT NULL,
+    CHECK (Lock='X')
+);
+
+-- Start processing events from the point this migration was run, rather
+-- than the beginning of time.
+INSERT INTO delayed_events_stream_pos (
+    stream_id
+) SELECT COALESCE(MAX(stream_ordering), 0) from events;
diff --git a/synapse/synapse_rust/push.pyi b/synapse/synapse_rust/push.pyi
index 27a974e1bb..3f317c3288 100644
--- a/synapse/synapse_rust/push.pyi
+++ b/synapse/synapse_rust/push.pyi
@@ -48,6 +48,7 @@ class FilteredPushRules:
         msc3381_polls_enabled: bool,
         msc3664_enabled: bool,
         msc4028_push_encrypted_events: bool,
+        msc4210_enabled: bool,
     ): ...
     def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
 
@@ -65,6 +66,7 @@ class PushRuleEvaluator:
         related_event_match_enabled: bool,
         room_version_feature_flags: Tuple[str, ...],
         msc3931_enabled: bool,
+        msc4210_enabled: bool,
     ): ...
     def run(
         self,
diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py
index b22a13ef01..26783c5622 100644
--- a/synapse/types/__init__.py
+++ b/synapse/types/__init__.py
@@ -20,6 +20,7 @@
 #
 #
 import abc
+import logging
 import re
 import string
 from enum import Enum
@@ -74,6 +75,9 @@ if TYPE_CHECKING:
     from synapse.storage.databases.main import DataStore, PurgeEventsStore
     from synapse.storage.databases.main.appservice import ApplicationServiceWorkerStore
 
+
+logger = logging.getLogger(__name__)
+
 # Define a state map type from type/state_key to T (usually an event ID or
 # event)
 T = TypeVar("T")
@@ -454,6 +458,8 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
     represented by a default `stream` attribute and a map of instance name to
     stream position of any writers that are ahead of the default stream
     position.
+
+    The values in `instance_map` must be greater than the `stream` attribute.
     """
 
     stream: int = attr.ib(validator=attr.validators.instance_of(int), kw_only=True)
@@ -468,6 +474,15 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
         kw_only=True,
     )
 
+    def __attrs_post_init__(self) -> None:
+        # Enforce that all instances have a value greater than the min stream
+        # position.
+        for i, v in self.instance_map.items():
+            if v <= self.stream:
+                raise ValueError(
+                    f"'instance_map' includes a stream position before the main 'stream' attribute. Instance: {i}"
+                )
+
     @classmethod
     @abc.abstractmethod
     async def parse(cls, store: "DataStore", string: str) -> "Self":
@@ -494,6 +509,9 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
             for instance in set(self.instance_map).union(other.instance_map)
         }
 
+        # Filter out any redundant entries.
+        instance_map = {i: s for i, s in instance_map.items() if s > max_stream}
+
         return attr.evolve(
             self, stream=max_stream, instance_map=immutabledict(instance_map)
         )
@@ -539,10 +557,15 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
     def bound_stream_token(self, max_stream: int) -> "Self":
         """Bound the stream positions to a maximum value"""
 
+        min_pos = min(self.stream, max_stream)
         return type(self)(
-            stream=min(self.stream, max_stream),
+            stream=min_pos,
             instance_map=immutabledict(
-                {k: min(s, max_stream) for k, s in self.instance_map.items()}
+                {
+                    k: min(s, max_stream)
+                    for k, s in self.instance_map.items()
+                    if min(s, max_stream) > min_pos
+                }
             ),
         )
 
@@ -637,6 +660,8 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
                 "Cannot set both 'topological' and 'instance_map' on 'RoomStreamToken'."
             )
 
+        super().__attrs_post_init__()
+
     @classmethod
     async def parse(cls, store: "PurgeEventsStore", string: str) -> "RoomStreamToken":
         try:
@@ -651,6 +676,11 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
 
                 instance_map = {}
                 for part in parts[1:]:
+                    if not part:
+                        # Handle tokens of the form `m5~`, which were created by
+                        # a bug
+                        continue
+
                     key, value = part.split(".")
                     instance_id = int(key)
                     pos = int(value)
@@ -666,7 +696,10 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
         except CancelledError:
             raise
         except Exception:
-            pass
+            # We log an exception here as even though this *might* be a client
+            # handing a bad token, its more likely that Synapse returned a bad
+            # token (and we really want to catch those!).
+            logger.exception("Failed to parse stream token: %r", string)
         raise SynapseError(400, "Invalid room stream token %r" % (string,))
 
     @classmethod
@@ -713,6 +746,8 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
         return self.instance_map.get(instance_name, self.stream)
 
     async def to_string(self, store: "DataStore") -> str:
+        """See class level docstring for information about the format."""
+
         if self.topological is not None:
             return "t%d-%d" % (self.topological, self.stream)
         elif self.instance_map:
@@ -727,8 +762,10 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
                 instance_id = await store.get_id_for_instance(name)
                 entries.append(f"{instance_id}.{pos}")
 
-            encoded_map = "~".join(entries)
-            return f"m{self.stream}~{encoded_map}"
+            if entries:
+                encoded_map = "~".join(entries)
+                return f"m{self.stream}~{encoded_map}"
+            return f"s{self.stream}"
         else:
             return "s%d" % (self.stream,)
 
@@ -740,6 +777,13 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
 
         return super().bound_stream_token(max_stream)
 
+    def __str__(self) -> str:
+        instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items()))
+        return (
+            f"RoomStreamToken(stream: {self.stream}, topological: {self.topological}, "
+            f"instances: {{{instances}}})"
+        )
+
 
 @attr.s(frozen=True, slots=True, order=False)
 class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
@@ -756,6 +800,11 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
 
                 instance_map = {}
                 for part in parts[1:]:
+                    if not part:
+                        # Handle tokens of the form `m5~`, which were created by
+                        # a bug
+                        continue
+
                     key, value = part.split(".")
                     instance_id = int(key)
                     pos = int(value)
@@ -770,10 +819,15 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
         except CancelledError:
             raise
         except Exception:
-            pass
+            # We log an exception here as even though this *might* be a client
+            # handing a bad token, its more likely that Synapse returned a bad
+            # token (and we really want to catch those!).
+            logger.exception("Failed to parse stream token: %r", string)
         raise SynapseError(400, "Invalid stream token %r" % (string,))
 
     async def to_string(self, store: "DataStore") -> str:
+        """See class level docstring for information about the format."""
+
         if self.instance_map:
             entries = []
             for name, pos in self.instance_map.items():
@@ -786,8 +840,10 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
                 instance_id = await store.get_id_for_instance(name)
                 entries.append(f"{instance_id}.{pos}")
 
-            encoded_map = "~".join(entries)
-            return f"m{self.stream}~{encoded_map}"
+            if entries:
+                encoded_map = "~".join(entries)
+                return f"m{self.stream}~{encoded_map}"
+            return str(self.stream)
         else:
             return str(self.stream)
 
@@ -824,6 +880,13 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
 
         return True
 
+    def __str__(self) -> str:
+        instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items()))
+        return (
+            f"MultiWriterStreamToken(stream: {self.stream}, "
+            f"instances: {{{instances}}})"
+        )
+
 
 class StreamKeyType(Enum):
     """Known stream types.
@@ -1082,12 +1145,64 @@ class StreamToken:
 
         return True
 
+    def __str__(self) -> str:
+        return (
+            f"StreamToken(room: {self.room_key}, presence: {self.presence_key}, "
+            f"typing: {self.typing_key}, receipt: {self.receipt_key}, "
+            f"account_data: {self.account_data_key}, push_rules: {self.push_rules_key}, "
+            f"to_device: {self.to_device_key}, device_list: {self.device_list_key}, "
+            f"groups: {self.groups_key}, un_partial_stated_rooms: {self.un_partial_stated_rooms_key})"
+        )
+
 
 StreamToken.START = StreamToken(
     RoomStreamToken(stream=0), 0, 0, MultiWriterStreamToken(stream=0), 0, 0, 0, 0, 0, 0
 )
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class SlidingSyncStreamToken:
+    """The same as a `StreamToken`, but includes an extra field at the start for
+    the sliding sync connection token (separated by a '/'). This is used to
+    store per-connection state.
+
+    This then looks something like:
+        5/s2633508_17_338_6732159_1082514_541479_274711_265584_1_379
+
+    Attributes:
+        stream_token: Token representing the position of all the standard
+            streams.
+        connection_position: Token used by sliding sync to track updates to any
+            per-connection state stored by Synapse.
+    """
+
+    stream_token: StreamToken
+    connection_position: int
+
+    @staticmethod
+    @cancellable
+    async def from_string(store: "DataStore", string: str) -> "SlidingSyncStreamToken":
+        """Creates a SlidingSyncStreamToken from its textual representation."""
+        try:
+            connection_position_str, stream_token_str = string.split("/", 1)
+            connection_position = int(connection_position_str)
+            stream_token = await StreamToken.from_string(store, stream_token_str)
+
+            return SlidingSyncStreamToken(
+                stream_token=stream_token,
+                connection_position=connection_position,
+            )
+        except CancelledError:
+            raise
+        except Exception:
+            raise SynapseError(400, "Invalid stream token")
+
+    async def to_string(self, store: "DataStore") -> str:
+        """Serializes the token to a string"""
+        stream_token_str = await self.stream_token.to_string(store)
+        return f"{self.connection_position}/{stream_token_str}"
+
+
 @attr.s(slots=True, frozen=True, auto_attribs=True)
 class PersistedPosition:
     """Position of a newly persisted row with instance that persisted it."""
@@ -1170,11 +1285,12 @@ class ReadReceipt:
 @attr.s(slots=True, frozen=True, auto_attribs=True)
 class DeviceListUpdates:
     """
-    An object containing a diff of information regarding other users' device lists, intended for
-    a recipient to carry out device list tracking.
+    An object containing a diff of information regarding other users' device lists,
+    intended for a recipient to carry out device list tracking.
 
     Attributes:
-        changed: A set of users whose device lists have changed recently.
+        changed: A set of users who have updated their device identity or
+            cross-signing keys, or who now share an encrypted room with.
         left: A set of users who the recipient no longer needs to track the device lists of.
             Typically when those users no longer share any end-to-end encryption enabled rooms.
     """
@@ -1192,7 +1308,7 @@ class DeviceListUpdates:
 
 
 def get_verify_key_from_cross_signing_key(
-    key_info: Mapping[str, Any]
+    key_info: Mapping[str, Any],
 ) -> Tuple[str, VerifyKey]:
     """Get the key ID and signedjson verify key from a cross-signing key dict
 
diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py
index 3bd3268e59..f2fbc1dddf 100644
--- a/synapse/types/handlers/__init__.py
+++ b/synapse/types/handlers/__init__.py
@@ -17,25 +17,23 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
-from enum import Enum
-from typing import TYPE_CHECKING, Dict, Final, List, Optional, Tuple
 
-import attr
-from typing_extensions import TypedDict
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
+from typing import List, Optional, TypedDict
 
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import Extra
-else:
-    from pydantic import Extra
+from synapse.api.constants import EventTypes
 
-from synapse.events import EventBase
-from synapse.types import JsonDict, JsonMapping, StreamToken, UserID
-from synapse.types.rest.client import SlidingSyncBody
-
-if TYPE_CHECKING:
-    from synapse.handlers.relations import BundledAggregations
+# Sliding Sync: The event types that clients should consider as new activity and affect
+# the `bump_stamp`
+SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES = {
+    EventTypes.Create,
+    EventTypes.Message,
+    EventTypes.Encrypted,
+    EventTypes.Sticker,
+    EventTypes.CallInvite,
+    EventTypes.PollStart,
+    EventTypes.LiveLocationShareStart,
+}
 
 
 class ShutdownRoomParams(TypedDict):
@@ -93,175 +91,3 @@ class ShutdownRoomResponse(TypedDict):
     failed_to_kick_users: List[str]
     local_aliases: List[str]
     new_room_id: Optional[str]
-
-
-class SlidingSyncConfig(SlidingSyncBody):
-    """
-    Inherit from `SlidingSyncBody` since we need all of the same fields and add a few
-    extra fields that we need in the handler
-    """
-
-    user: UserID
-    device_id: Optional[str]
-
-    # Pydantic config
-    class Config:
-        # By default, ignore fields that we don't recognise.
-        extra = Extra.ignore
-        # By default, don't allow fields to be reassigned after parsing.
-        allow_mutation = False
-        # Allow custom types like `UserID` to be used in the model
-        arbitrary_types_allowed = True
-
-
-class OperationType(Enum):
-    """
-    Represents the operation types in a Sliding Sync window.
-
-    Attributes:
-        SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about
-            entries in this range.
-        INSERT: Sets a single entry. If the position is not empty then clients MUST move
-            entries to the left or the right depending on where the closest empty space is.
-        DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move
-            places.
-        INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for
-            offline support, but they should be treated as empty when additional operations
-            which concern indexes in the range arrive from the server.
-    """
-
-    SYNC: Final = "SYNC"
-    INSERT: Final = "INSERT"
-    DELETE: Final = "DELETE"
-    INVALIDATE: Final = "INVALIDATE"
-
-
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class SlidingSyncResult:
-    """
-    The Sliding Sync result to be serialized to JSON for a response.
-
-    Attributes:
-        next_pos: The next position token in the sliding window to request (next_batch).
-        lists: Sliding window API. A map of list key to list results.
-        rooms: Room subscription API. A map of room ID to room subscription to room results.
-        extensions: Extensions API. A map of extension key to extension results.
-    """
-
-    @attr.s(slots=True, frozen=True, auto_attribs=True)
-    class RoomResult:
-        """
-        Attributes:
-            name: Room name or calculated room name.
-            avatar: Room avatar
-            heroes: List of stripped membership events (containing `user_id` and optionally
-                `avatar_url` and `displayname`) for the users used to calculate the room name.
-            is_dm: Flag to specify whether the room is a direct-message room (most likely
-                between two people).
-            initial: Flag which is set when this is the first time the server is sending this
-                data on this connection. Clients can use this flag to replace or update
-                their local state. When there is an update, servers MUST omit this flag
-                entirely and NOT send "initial":false as this is wasteful on bandwidth. The
-                absence of this flag means 'false'.
-            required_state: The current state of the room
-            timeline: Latest events in the room. The last event is the most recent.
-            bundled_aggregations: A mapping of event ID to the bundled aggregations for
-                the timeline events above. This allows clients to show accurate reaction
-                counts (or edits, threads), even if some of the reaction events were skipped
-                over in a gappy sync.
-            stripped_state: Stripped state events (for rooms where the usre is
-                invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2,
-                absent on joined/left rooms
-            prev_batch: A token that can be passed as a start parameter to the
-                `/rooms/<room_id>/messages` API to retrieve earlier messages.
-            limited: True if their are more events than fit between the given position and now.
-                Sync again to get more.
-            num_live: The number of timeline events which have just occurred and are not historical.
-                The last N events are 'live' and should be treated as such. This is mostly
-                useful to determine whether a given @mention event should make a noise or not.
-                Clients cannot rely solely on the absence of `initial: true` to determine live
-                events because if a room not in the sliding window bumps into the window because
-                of an @mention it will have `initial: true` yet contain a single live event
-                (with potentially other old events in the timeline).
-            joined_count: The number of users with membership of join, including the client's
-                own user ID. (same as sync `v2 m.joined_member_count`)
-            invited_count: The number of users with membership of invite. (same as sync v2
-                `m.invited_member_count`)
-            notification_count: The total number of unread notifications for this room. (same
-                as sync v2)
-            highlight_count: The number of unread notifications for this room with the highlight
-                flag set. (same as sync v2)
-        """
-
-        name: Optional[str]
-        avatar: Optional[str]
-        heroes: Optional[List[EventBase]]
-        is_dm: bool
-        initial: bool
-        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
-        required_state: Optional[List[EventBase]]
-        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
-        timeline_events: Optional[List[EventBase]]
-        bundled_aggregations: Optional[Dict[str, "BundledAggregations"]]
-        # Optional because it's only relevant to invite/knock rooms
-        stripped_state: Optional[List[JsonDict]]
-        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
-        prev_batch: Optional[StreamToken]
-        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
-        limited: Optional[bool]
-        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
-        num_live: Optional[int]
-        joined_count: int
-        invited_count: int
-        notification_count: int
-        highlight_count: int
-
-    @attr.s(slots=True, frozen=True, auto_attribs=True)
-    class SlidingWindowList:
-        """
-        Attributes:
-            count: The total number of entries in the list. Always present if this list
-                is.
-            ops: The sliding list operations to perform.
-        """
-
-        @attr.s(slots=True, frozen=True, auto_attribs=True)
-        class Operation:
-            """
-            Attributes:
-                op: The operation type to perform.
-                range: Which index positions are affected by this operation. These are
-                    both inclusive.
-                room_ids: Which room IDs are affected by this operation. These IDs match
-                    up to the positions in the `range`, so the last room ID in this list
-                    matches the 9th index. The room data is held in a separate object.
-            """
-
-            op: OperationType
-            range: Tuple[int, int]
-            room_ids: List[str]
-
-        count: int
-        ops: List[Operation]
-
-    next_pos: StreamToken
-    lists: Dict[str, SlidingWindowList]
-    rooms: Dict[str, RoomResult]
-    extensions: JsonMapping
-
-    def __bool__(self) -> bool:
-        """Make the result appear empty if there are no updates. This is used
-        to tell if the notifier needs to wait for more events when polling for
-        events.
-        """
-        return bool(self.lists or self.rooms or self.extensions)
-
-    @staticmethod
-    def empty(next_pos: StreamToken) -> "SlidingSyncResult":
-        "Return a new empty result"
-        return SlidingSyncResult(
-            next_pos=next_pos,
-            lists={},
-            rooms={},
-            extensions={},
-        )
diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py
new file mode 100644
index 0000000000..aae60fddea
--- /dev/null
+++ b/synapse/types/handlers/sliding_sync.py
@@ -0,0 +1,874 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+import logging
+import typing
+from collections import ChainMap
+from enum import Enum
+from typing import (
+    TYPE_CHECKING,
+    AbstractSet,
+    Any,
+    Callable,
+    Dict,
+    Final,
+    Generic,
+    List,
+    Mapping,
+    MutableMapping,
+    Optional,
+    Sequence,
+    Set,
+    Tuple,
+    TypeVar,
+    cast,
+)
+
+import attr
+
+from synapse._pydantic_compat import Extra
+from synapse.api.constants import EventTypes
+from synapse.events import EventBase
+from synapse.types import (
+    DeviceListUpdates,
+    JsonDict,
+    JsonMapping,
+    MultiWriterStreamToken,
+    Requester,
+    RoomStreamToken,
+    SlidingSyncStreamToken,
+    StrCollection,
+    StreamToken,
+    UserID,
+)
+from synapse.types.rest.client import SlidingSyncBody
+
+if TYPE_CHECKING:
+    from synapse.handlers.relations import BundledAggregations
+
+logger = logging.getLogger(__name__)
+
+
+class SlidingSyncConfig(SlidingSyncBody):
+    """
+    Inherit from `SlidingSyncBody` since we need all of the same fields and add a few
+    extra fields that we need in the handler
+    """
+
+    user: UserID
+    requester: Requester
+
+    # Pydantic config
+    class Config:
+        # By default, ignore fields that we don't recognise.
+        extra = Extra.ignore
+        # By default, don't allow fields to be reassigned after parsing.
+        allow_mutation = False
+        # Allow custom types like `UserID` to be used in the model
+        arbitrary_types_allowed = True
+
+
+class OperationType(Enum):
+    """
+    Represents the operation types in a Sliding Sync window.
+
+    Attributes:
+        SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about
+            entries in this range.
+        INSERT: Sets a single entry. If the position is not empty then clients MUST move
+            entries to the left or the right depending on where the closest empty space is.
+        DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move
+            places.
+        INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for
+            offline support, but they should be treated as empty when additional operations
+            which concern indexes in the range arrive from the server.
+    """
+
+    SYNC: Final = "SYNC"
+    INSERT: Final = "INSERT"
+    DELETE: Final = "DELETE"
+    INVALIDATE: Final = "INVALIDATE"
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class SlidingSyncResult:
+    """
+    The Sliding Sync result to be serialized to JSON for a response.
+
+    Attributes:
+        next_pos: The next position token in the sliding window to request (next_batch).
+        lists: Sliding window API. A map of list key to list results.
+        rooms: Room subscription API. A map of room ID to room results.
+        extensions: Extensions API. A map of extension key to extension results.
+    """
+
+    @attr.s(slots=True, frozen=True, auto_attribs=True)
+    class RoomResult:
+        """
+        Attributes:
+            name: Room name or calculated room name.
+            avatar: Room avatar
+            heroes: List of stripped membership events (containing `user_id` and optionally
+                `avatar_url` and `displayname`) for the users used to calculate the room name.
+            is_dm: Flag to specify whether the room is a direct-message room (most likely
+                between two people).
+            initial: Flag which is set when this is the first time the server is sending this
+                data on this connection. Clients can use this flag to replace or update
+                their local state. When there is an update, servers MUST omit this flag
+                entirely and NOT send "initial":false as this is wasteful on bandwidth. The
+                absence of this flag means 'false'.
+            unstable_expanded_timeline: Flag which is set if we're returning more historic
+                events due to the timeline limit having increased. See "XXX: Odd behavior"
+                comment ing `synapse.handlers.sliding_sync`.
+            required_state: The current state of the room
+            timeline: Latest events in the room. The last event is the most recent.
+            bundled_aggregations: A mapping of event ID to the bundled aggregations for
+                the timeline events above. This allows clients to show accurate reaction
+                counts (or edits, threads), even if some of the reaction events were skipped
+                over in a gappy sync.
+            stripped_state: Stripped state events (for rooms where the usre is
+                invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2,
+                absent on joined/left rooms
+            prev_batch: A token that can be passed as a start parameter to the
+                `/rooms/<room_id>/messages` API to retrieve earlier messages.
+            limited: True if there are more events than `timeline_limit` looking
+                backwards from the `response.pos` to the `request.pos`.
+            num_live: The number of timeline events which have just occurred and are not historical.
+                The last N events are 'live' and should be treated as such. This is mostly
+                useful to determine whether a given @mention event should make a noise or not.
+                Clients cannot rely solely on the absence of `initial: true` to determine live
+                events because if a room not in the sliding window bumps into the window because
+                of an @mention it will have `initial: true` yet contain a single live event
+                (with potentially other old events in the timeline).
+            bump_stamp: The `stream_ordering` of the last event according to the
+                `bump_event_types`. This helps clients sort more readily without them
+                needing to pull in a bunch of the timeline to determine the last activity.
+                `bump_event_types` is a thing because for example, we don't want display
+                name changes to mark the room as unread and bump it to the top. For
+                encrypted rooms, we just have to consider any activity as a bump because we
+                can't see the content and the client has to figure it out for themselves.
+                This may not be included if there hasn't been a change.
+            joined_count: The number of users with membership of join, including the client's
+                own user ID. (same as sync `v2 m.joined_member_count`)
+            invited_count: The number of users with membership of invite. (same as sync v2
+                `m.invited_member_count`)
+            notification_count: The total number of unread notifications for this room. (same
+                as sync v2)
+            highlight_count: The number of unread notifications for this room with the highlight
+                flag set. (same as sync v2)
+        """
+
+        @attr.s(slots=True, frozen=True, auto_attribs=True)
+        class StrippedHero:
+            user_id: str
+            display_name: Optional[str]
+            avatar_url: Optional[str]
+
+        name: Optional[str]
+        avatar: Optional[str]
+        heroes: Optional[List[StrippedHero]]
+        is_dm: bool
+        initial: bool
+        unstable_expanded_timeline: bool
+        # Should be empty for invite/knock rooms with `stripped_state`
+        required_state: List[EventBase]
+        # Should be empty for invite/knock rooms with `stripped_state`
+        timeline_events: List[EventBase]
+        bundled_aggregations: Optional[Dict[str, "BundledAggregations"]]
+        # Optional because it's only relevant to invite/knock rooms
+        stripped_state: List[JsonDict]
+        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
+        prev_batch: Optional[StreamToken]
+        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
+        limited: Optional[bool]
+        # Only optional because it won't be included for invite/knock rooms with `stripped_state`
+        num_live: Optional[int]
+        bump_stamp: Optional[int]
+        joined_count: Optional[int]
+        invited_count: Optional[int]
+        notification_count: int
+        highlight_count: int
+
+        def __bool__(self) -> bool:
+            return (
+                # If this is the first time the client is seeing the room, we should not filter it out
+                # under any circumstance.
+                self.initial
+                # We need to let the client know if any of the info has changed
+                or self.name is not None
+                or self.avatar is not None
+                or bool(self.heroes)
+                or self.joined_count is not None
+                or self.invited_count is not None
+                # We need to let the client know if there are any new events
+                or bool(self.required_state)
+                or bool(self.timeline_events)
+                or bool(self.stripped_state)
+            )
+
+    @attr.s(slots=True, frozen=True, auto_attribs=True)
+    class SlidingWindowList:
+        """
+        Attributes:
+            count: The total number of entries in the list. Always present if this list
+                is.
+            ops: The sliding list operations to perform.
+        """
+
+        @attr.s(slots=True, frozen=True, auto_attribs=True)
+        class Operation:
+            """
+            Attributes:
+                op: The operation type to perform.
+                range: Which index positions are affected by this operation. These are
+                    both inclusive.
+                room_ids: Which room IDs are affected by this operation. These IDs match
+                    up to the positions in the `range`, so the last room ID in this list
+                    matches the 9th index. The room data is held in a separate object.
+            """
+
+            op: OperationType
+            range: Tuple[int, int]
+            room_ids: List[str]
+
+        count: int
+        ops: List[Operation]
+
+    @attr.s(slots=True, frozen=True, auto_attribs=True)
+    class Extensions:
+        """Responses for extensions
+
+        Attributes:
+            to_device: The to-device extension (MSC3885)
+            e2ee: The E2EE device extension (MSC3884)
+        """
+
+        @attr.s(slots=True, frozen=True, auto_attribs=True)
+        class ToDeviceExtension:
+            """The to-device extension (MSC3885)
+
+            Attributes:
+                next_batch: The to-device stream token the client should use
+                    to get more results
+                events: A list of to-device messages for the client
+            """
+
+            next_batch: str
+            events: Sequence[JsonMapping]
+
+            def __bool__(self) -> bool:
+                return bool(self.events)
+
+        @attr.s(slots=True, frozen=True, auto_attribs=True)
+        class E2eeExtension:
+            """The E2EE device extension (MSC3884)
+
+            Attributes:
+                device_list_updates: List of user_ids whose devices have changed or left (only
+                    present on incremental syncs).
+                device_one_time_keys_count: Map from key algorithm to the number of
+                    unclaimed one-time keys currently held on the server for this device. If
+                    an algorithm is unlisted, the count for that algorithm is assumed to be
+                    zero. If this entire parameter is missing, the count for all algorithms
+                    is assumed to be zero.
+                device_unused_fallback_key_types: List of unused fallback key algorithms
+                    for this device.
+            """
+
+            # Only present on incremental syncs
+            device_list_updates: Optional[DeviceListUpdates]
+            device_one_time_keys_count: Mapping[str, int]
+            device_unused_fallback_key_types: Sequence[str]
+
+            def __bool__(self) -> bool:
+                # Note that "signed_curve25519" is always returned in key count responses
+                # regardless of whether we uploaded any keys for it. This is necessary until
+                # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
+                #
+                # Also related:
+                # https://github.com/element-hq/element-android/issues/3725 and
+                # https://github.com/matrix-org/synapse/issues/10456
+                default_otk = self.device_one_time_keys_count.get("signed_curve25519")
+                more_than_default_otk = len(self.device_one_time_keys_count) > 1 or (
+                    default_otk is not None and default_otk > 0
+                )
+
+                return bool(
+                    more_than_default_otk
+                    or self.device_list_updates
+                    or self.device_unused_fallback_key_types
+                )
+
+        @attr.s(slots=True, frozen=True, auto_attribs=True)
+        class AccountDataExtension:
+            """The Account Data extension (MSC3959)
+
+            Attributes:
+                global_account_data_map: Mapping from `type` to `content` of global
+                    account data events.
+                account_data_by_room_map: Mapping from room_id to mapping of `type` to
+                    `content` of room account data events.
+            """
+
+            global_account_data_map: Mapping[str, JsonMapping]
+            account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]]
+
+            def __bool__(self) -> bool:
+                return bool(
+                    self.global_account_data_map or self.account_data_by_room_map
+                )
+
+        @attr.s(slots=True, frozen=True, auto_attribs=True)
+        class ReceiptsExtension:
+            """The Receipts extension (MSC3960)
+
+            Attributes:
+                room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral
+                    event (type, content)
+            """
+
+            room_id_to_receipt_map: Mapping[str, JsonMapping]
+
+            def __bool__(self) -> bool:
+                return bool(self.room_id_to_receipt_map)
+
+        @attr.s(slots=True, frozen=True, auto_attribs=True)
+        class TypingExtension:
+            """The Typing Notification extension (MSC3961)
+
+            Attributes:
+                room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral
+                    event (type, content)
+            """
+
+            room_id_to_typing_map: Mapping[str, JsonMapping]
+
+            def __bool__(self) -> bool:
+                return bool(self.room_id_to_typing_map)
+
+        to_device: Optional[ToDeviceExtension] = None
+        e2ee: Optional[E2eeExtension] = None
+        account_data: Optional[AccountDataExtension] = None
+        receipts: Optional[ReceiptsExtension] = None
+        typing: Optional[TypingExtension] = None
+
+        def __bool__(self) -> bool:
+            return bool(
+                self.to_device
+                or self.e2ee
+                or self.account_data
+                or self.receipts
+                or self.typing
+            )
+
+    next_pos: SlidingSyncStreamToken
+    lists: Mapping[str, SlidingWindowList]
+    rooms: Dict[str, RoomResult]
+    extensions: Extensions
+
+    def __bool__(self) -> bool:
+        """Make the result appear empty if there are no updates. This is used
+        to tell if the notifier needs to wait for more events when polling for
+        events.
+        """
+        # We don't include `self.lists` here, as a) `lists` is always non-empty even if
+        # there are no changes, and b) since we're sorting rooms by `stream_ordering` of
+        # the latest activity, anything that would cause the order to change would end
+        # up in `self.rooms` and cause us to send down the change.
+        return bool(self.rooms or self.extensions)
+
+    @staticmethod
+    def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult":
+        "Return a new empty result"
+        return SlidingSyncResult(
+            next_pos=next_pos,
+            lists={},
+            rooms={},
+            extensions=SlidingSyncResult.Extensions(),
+        )
+
+
+class StateValues:
+    """
+    Understood values of the (type, state_key) tuple in `required_state`.
+    """
+
+    # Include all state events of the given type
+    WILDCARD: Final = "*"
+    # Lazy-load room membership events (include room membership events for any event
+    # `sender` in the timeline). We only give special meaning to this value when it's a
+    # `state_key`.
+    LAZY: Final = "$LAZY"
+    # Subsitute with the requester's user ID. Typically used by clients to get
+    # the user's membership.
+    ME: Final = "$ME"
+
+
+# We can't freeze this class because we want to update it in place with the
+# de-duplicated data.
+@attr.s(slots=True, auto_attribs=True, frozen=True)
+class RoomSyncConfig:
+    """
+    Holds the config for what data we should fetch for a room in the sync response.
+
+    Attributes:
+        timeline_limit: The maximum number of events to return in the timeline.
+
+        required_state_map: Map from state event type to state_keys requested for the
+            room. The values are close to `StateKey` but actually use a syntax where you
+            can provide `*` wildcard and `$LAZY` for lazy-loading room members.
+    """
+
+    timeline_limit: int
+    required_state_map: Mapping[str, AbstractSet[str]]
+
+    @classmethod
+    def from_room_config(
+        cls,
+        room_params: SlidingSyncConfig.CommonRoomParameters,
+    ) -> "RoomSyncConfig":
+        """
+        Create a `RoomSyncConfig` from a `SlidingSyncList`/`RoomSubscription` config.
+
+        Args:
+            room_params: `SlidingSyncConfig.SlidingSyncList` or `SlidingSyncConfig.RoomSubscription`
+        """
+        required_state_map: Dict[str, Set[str]] = {}
+        for (
+            state_type,
+            state_key,
+        ) in room_params.required_state:
+            # If we already have a wildcard for this specific `state_key`, we don't need
+            # to add it since the wildcard already covers it.
+            if state_key in required_state_map.get(StateValues.WILDCARD, set()):
+                continue
+
+            # If we already have a wildcard `state_key` for this `state_type`, we don't need
+            # to add anything else
+            if StateValues.WILDCARD in required_state_map.get(state_type, set()):
+                continue
+
+            # If we're getting wildcards for the `state_type` and `state_key`, that's
+            # all that matters so get rid of any other entries
+            if state_type == StateValues.WILDCARD and state_key == StateValues.WILDCARD:
+                required_state_map = {StateValues.WILDCARD: {StateValues.WILDCARD}}
+                # We can break, since we don't need to add anything else
+                break
+
+            # If we're getting a wildcard for the `state_type`, get rid of any other
+            # entries with the same `state_key`, since the wildcard will cover it already.
+            elif state_type == StateValues.WILDCARD:
+                # Get rid of any entries that match the `state_key`
+                #
+                # Make a copy so we don't run into an error: `dictionary changed size
+                # during iteration`, when we remove items
+                for (
+                    existing_state_type,
+                    existing_state_key_set,
+                ) in list(required_state_map.items()):
+                    # Make a copy so we don't run into an error: `Set changed size during
+                    # iteration`, when we filter out and remove items
+                    for existing_state_key in existing_state_key_set.copy():
+                        if existing_state_key == state_key:
+                            existing_state_key_set.remove(state_key)
+
+                    # If we've the left the `set()` empty, remove it from the map
+                    if existing_state_key_set == set():
+                        required_state_map.pop(existing_state_type, None)
+
+            # If we're getting a wildcard `state_key`, get rid of any other state_keys
+            # for this `state_type` since the wildcard will cover it already.
+            if state_key == StateValues.WILDCARD:
+                required_state_map[state_type] = {state_key}
+            # Otherwise, just add it to the set
+            else:
+                if required_state_map.get(state_type) is None:
+                    required_state_map[state_type] = {state_key}
+                else:
+                    required_state_map[state_type].add(state_key)
+
+        return cls(
+            timeline_limit=room_params.timeline_limit,
+            required_state_map=required_state_map,
+        )
+
+    def combine_room_sync_config(
+        self, other_room_sync_config: "RoomSyncConfig"
+    ) -> "RoomSyncConfig":
+        """
+        Combine this `RoomSyncConfig` with another `RoomSyncConfig` and return the
+        superset union of the two.
+        """
+        timeline_limit = self.timeline_limit
+        required_state_map = {
+            event_type: set(state_keys)
+            for event_type, state_keys in self.required_state_map.items()
+        }
+
+        # Take the highest timeline limit
+        if timeline_limit < other_room_sync_config.timeline_limit:
+            timeline_limit = other_room_sync_config.timeline_limit
+
+        # Union the required state
+        for (
+            state_type,
+            state_key_set,
+        ) in other_room_sync_config.required_state_map.items():
+            # If we already have a wildcard for everything, we don't need to add
+            # anything else
+            if StateValues.WILDCARD in required_state_map.get(
+                StateValues.WILDCARD, set()
+            ):
+                break
+
+            # If we already have a wildcard `state_key` for this `state_type`, we don't need
+            # to add anything else
+            if StateValues.WILDCARD in required_state_map.get(state_type, set()):
+                continue
+
+            # If we're getting wildcards for the `state_type` and `state_key`, that's
+            # all that matters so get rid of any other entries
+            if (
+                state_type == StateValues.WILDCARD
+                and StateValues.WILDCARD in state_key_set
+            ):
+                required_state_map = {state_type: {StateValues.WILDCARD}}
+                # We can break, since we don't need to add anything else
+                break
+
+            for state_key in state_key_set:
+                # If we already have a wildcard for this specific `state_key`, we don't need
+                # to add it since the wildcard already covers it.
+                if state_key in required_state_map.get(StateValues.WILDCARD, set()):
+                    continue
+
+                # If we're getting a wildcard for the `state_type`, get rid of any other
+                # entries with the same `state_key`, since the wildcard will cover it already.
+                if state_type == StateValues.WILDCARD:
+                    # Get rid of any entries that match the `state_key`
+                    #
+                    # Make a copy so we don't run into an error: `dictionary changed size
+                    # during iteration`, when we remove items
+                    for existing_state_type, existing_state_key_set in list(
+                        required_state_map.items()
+                    ):
+                        # Make a copy so we don't run into an error: `Set changed size during
+                        # iteration`, when we filter out and remove items
+                        for existing_state_key in existing_state_key_set.copy():
+                            if existing_state_key == state_key:
+                                existing_state_key_set.remove(state_key)
+
+                        # If we've the left the `set()` empty, remove it from the map
+                        if existing_state_key_set == set():
+                            required_state_map.pop(existing_state_type, None)
+
+                # If we're getting a wildcard `state_key`, get rid of any other state_keys
+                # for this `state_type` since the wildcard will cover it already.
+                if state_key == StateValues.WILDCARD:
+                    required_state_map[state_type] = {state_key}
+                    break
+                # Otherwise, just add it to the set
+                else:
+                    if required_state_map.get(state_type) is None:
+                        required_state_map[state_type] = {state_key}
+                    else:
+                        required_state_map[state_type].add(state_key)
+
+        return RoomSyncConfig(timeline_limit, required_state_map)
+
+    def must_await_full_state(
+        self,
+        is_mine_id: Callable[[str], bool],
+    ) -> bool:
+        """
+        Check if we have a we're only requesting `required_state` which is completely
+        satisfied even with partial state, then we don't need to `await_full_state` before
+        we can return it.
+
+        Also see `StateFilter.must_await_full_state(...)` for comparison
+
+        Partially-stated rooms should have all state events except for remote membership
+        events so if we require a remote membership event anywhere, then we need to
+        return `True` (requires full state).
+
+        Args:
+            is_mine_id: a callable which confirms if a given state_key matches a mxid
+               of a local user
+        """
+        wildcard_state_keys = self.required_state_map.get(StateValues.WILDCARD)
+        # Requesting *all* state in the room so we have to wait
+        if (
+            wildcard_state_keys is not None
+            and StateValues.WILDCARD in wildcard_state_keys
+        ):
+            return True
+
+        # If the wildcards don't refer to remote user IDs, then we don't need to wait
+        # for full state.
+        if wildcard_state_keys is not None:
+            for possible_user_id in wildcard_state_keys:
+                if not possible_user_id[0].startswith(UserID.SIGIL):
+                    # Not a user ID
+                    continue
+
+                localpart_hostname = possible_user_id.split(":", 1)
+                if len(localpart_hostname) < 2:
+                    # Not a user ID
+                    continue
+
+                if not is_mine_id(possible_user_id):
+                    return True
+
+        membership_state_keys = self.required_state_map.get(EventTypes.Member)
+        # We aren't requesting any membership events at all so the partial state will
+        # cover us.
+        if membership_state_keys is None:
+            return False
+
+        # If we're requesting entirely local users, the partial state will cover us.
+        for user_id in membership_state_keys:
+            if user_id == StateValues.ME:
+                continue
+            # We're lazy-loading membership so we can just return the state we have.
+            # Lazy-loading means we include membership for any event `sender` in the
+            # timeline but since we had to auth those timeline events, we will have the
+            # membership state for them (including from remote senders).
+            elif user_id == StateValues.LAZY:
+                continue
+            elif user_id == StateValues.WILDCARD:
+                return False
+            elif not is_mine_id(user_id):
+                return True
+
+        # Local users only so the partial state will cover us.
+        return False
+
+
+class HaveSentRoomFlag(Enum):
+    """Flag for whether we have sent the room down a sliding sync connection.
+
+    The valid state changes here are:
+        NEVER -> LIVE
+        LIVE -> PREVIOUSLY
+        PREVIOUSLY -> LIVE
+    """
+
+    # The room has never been sent down (or we have forgotten we have sent it
+    # down).
+    NEVER = "never"
+
+    # We have previously sent the room down, but there are updates that we
+    # haven't sent down.
+    PREVIOUSLY = "previously"
+
+    # We have sent the room down and the client has received all updates.
+    LIVE = "live"
+
+
+T = TypeVar("T", str, RoomStreamToken, MultiWriterStreamToken, int)
+
+
+@attr.s(auto_attribs=True, slots=True, frozen=True)
+class HaveSentRoom(Generic[T]):
+    """Whether we have sent the room data down a sliding sync connection.
+
+    We are generic over the type of token used, e.g. `RoomStreamToken` or
+    `MultiWriterStreamToken`.
+
+    Attributes:
+        status: Flag of if we have or haven't sent down the room
+        last_token: If the flag is `PREVIOUSLY` then this is non-null and
+            contains the last stream token of the last updates we sent down
+            the room, i.e. we still need to send everything since then to the
+            client.
+    """
+
+    status: HaveSentRoomFlag
+    last_token: Optional[T]
+
+    @staticmethod
+    def live() -> "HaveSentRoom[T]":
+        return HaveSentRoom(HaveSentRoomFlag.LIVE, None)
+
+    @staticmethod
+    def previously(last_token: T) -> "HaveSentRoom[T]":
+        """Constructor for `PREVIOUSLY` flag."""
+        return HaveSentRoom(HaveSentRoomFlag.PREVIOUSLY, last_token)
+
+    @staticmethod
+    def never() -> "HaveSentRoom[T]":
+        # We use a singleton to avoid repeatedly instantiating new `never`
+        # values.
+        return _HAVE_SENT_ROOM_NEVER
+
+
+_HAVE_SENT_ROOM_NEVER: HaveSentRoom[Any] = HaveSentRoom(HaveSentRoomFlag.NEVER, None)
+
+
+@attr.s(auto_attribs=True, slots=True, frozen=True)
+class RoomStatusMap(Generic[T]):
+    """For a given stream, e.g. events, records what we have or have not sent
+    down for that stream in a given room."""
+
+    # `room_id` -> `HaveSentRoom`
+    _statuses: Mapping[str, HaveSentRoom[T]] = attr.Factory(dict)
+
+    def have_sent_room(self, room_id: str) -> HaveSentRoom[T]:
+        """Return whether we have previously sent the room down"""
+        return self._statuses.get(room_id, HaveSentRoom.never())
+
+    def get_mutable(self) -> "MutableRoomStatusMap[T]":
+        """Get a mutable copy of this state."""
+        return MutableRoomStatusMap(
+            statuses=self._statuses,
+        )
+
+    def copy(self) -> "RoomStatusMap[T]":
+        """Make a copy of the class. Useful for converting from a mutable to
+        immutable version."""
+
+        return RoomStatusMap(statuses=dict(self._statuses))
+
+    def __len__(self) -> int:
+        return len(self._statuses)
+
+
+class MutableRoomStatusMap(RoomStatusMap[T]):
+    """A mutable version of `RoomStatusMap`"""
+
+    # We use a ChainMap here so that we can easily track what has been updated
+    # and what hasn't. Note that when we persist the per connection state this
+    # will get flattened to a normal dict (via calling `.copy()`)
+    _statuses: typing.ChainMap[str, HaveSentRoom[T]]
+
+    def __init__(
+        self,
+        statuses: Mapping[str, HaveSentRoom[T]],
+    ) -> None:
+        # ChainMap requires a mutable mapping, but we're not actually going to
+        # mutate it.
+        statuses = cast(MutableMapping, statuses)
+
+        super().__init__(
+            statuses=ChainMap({}, statuses),
+        )
+
+    def get_updates(self) -> Mapping[str, HaveSentRoom[T]]:
+        """Return only the changes that were made"""
+        return self._statuses.maps[0]
+
+    def record_sent_rooms(self, room_ids: StrCollection) -> None:
+        """Record that we have sent these rooms in the response"""
+        for room_id in room_ids:
+            current_status = self._statuses.get(room_id, HaveSentRoom.never())
+            if current_status.status == HaveSentRoomFlag.LIVE:
+                continue
+
+            self._statuses[room_id] = HaveSentRoom.live()
+
+    def record_unsent_rooms(self, room_ids: StrCollection, from_token: T) -> None:
+        """Record that we have not sent these rooms in the response, but there
+        have been updates.
+        """
+        # Whether we add/update the entries for unsent rooms depends on the
+        # existing entry:
+        #   - LIVE: We have previously sent down everything up to
+        #     `last_room_token, so we update the entry to be `PREVIOUSLY` with
+        #     `last_room_token`.
+        #   - PREVIOUSLY: We have previously sent down everything up to *a*
+        #     given token, so we don't need to update the entry.
+        #   - NEVER: We have never previously sent down the room, and we haven't
+        #     sent anything down this time either so we leave it as NEVER.
+
+        for room_id in room_ids:
+            current_status = self._statuses.get(room_id, HaveSentRoom.never())
+            if current_status.status != HaveSentRoomFlag.LIVE:
+                continue
+
+            self._statuses[room_id] = HaveSentRoom.previously(from_token)
+
+
+@attr.s(auto_attribs=True, frozen=True)
+class PerConnectionState:
+    """The per-connection state. A snapshot of what we've sent down the
+    connection before.
+
+    Currently, we track whether we've sent down various aspects of a given room
+    before.
+
+    We use the `rooms` field to store the position in the events stream for each
+    room that we've previously sent to the client before. On the next request
+    that includes the room, we can then send only what's changed since that
+    recorded position.
+
+    Same goes for the `receipts` field so we only need to send the new receipts
+    since the last time you made a sync request.
+
+    Attributes:
+        rooms: The status of each room for the events stream.
+        receipts: The status of each room for the receipts stream.
+        room_configs: Map from room_id to the `RoomSyncConfig` of all
+            rooms that we have previously sent down.
+    """
+
+    rooms: RoomStatusMap[RoomStreamToken] = attr.Factory(RoomStatusMap)
+    receipts: RoomStatusMap[MultiWriterStreamToken] = attr.Factory(RoomStatusMap)
+    account_data: RoomStatusMap[int] = attr.Factory(RoomStatusMap)
+
+    room_configs: Mapping[str, RoomSyncConfig] = attr.Factory(dict)
+
+    def get_mutable(self) -> "MutablePerConnectionState":
+        """Get a mutable copy of this state."""
+        room_configs = cast(MutableMapping[str, RoomSyncConfig], self.room_configs)
+
+        return MutablePerConnectionState(
+            rooms=self.rooms.get_mutable(),
+            receipts=self.receipts.get_mutable(),
+            account_data=self.account_data.get_mutable(),
+            room_configs=ChainMap({}, room_configs),
+        )
+
+    def copy(self) -> "PerConnectionState":
+        return PerConnectionState(
+            rooms=self.rooms.copy(),
+            receipts=self.receipts.copy(),
+            account_data=self.account_data.copy(),
+            room_configs=dict(self.room_configs),
+        )
+
+    def __len__(self) -> int:
+        return len(self.rooms) + len(self.receipts) + len(self.room_configs)
+
+
+@attr.s(auto_attribs=True)
+class MutablePerConnectionState(PerConnectionState):
+    """A mutable version of `PerConnectionState`"""
+
+    rooms: MutableRoomStatusMap[RoomStreamToken]
+    receipts: MutableRoomStatusMap[MultiWriterStreamToken]
+    account_data: MutableRoomStatusMap[int]
+
+    room_configs: typing.ChainMap[str, RoomSyncConfig]
+
+    def has_updates(self) -> bool:
+        return (
+            bool(self.rooms.get_updates())
+            or bool(self.receipts.get_updates())
+            or bool(self.account_data.get_updates())
+            or bool(self.get_room_config_updates())
+        )
+
+    def get_room_config_updates(self) -> Mapping[str, RoomSyncConfig]:
+        """Get updates to the room sync config"""
+        return self.room_configs.maps[0]
diff --git a/synapse/types/rest/__init__.py b/synapse/types/rest/__init__.py
index 2b6f5ed35a..183831e79a 100644
--- a/synapse/types/rest/__init__.py
+++ b/synapse/types/rest/__init__.py
@@ -18,14 +18,7 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
-from typing import TYPE_CHECKING
-
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import BaseModel, Extra
-else:
-    from pydantic import BaseModel, Extra
+from synapse._pydantic_compat import BaseModel, Extra
 
 
 class RequestBodyModel(BaseModel):
diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py
index 55f6b44053..c739bd16b0 100644
--- a/synapse/types/rest/client/__init__.py
+++ b/synapse/types/rest/client/__init__.py
@@ -20,29 +20,15 @@
 #
 from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
-
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import (
-        Extra,
-        StrictBool,
-        StrictInt,
-        StrictStr,
-        conint,
-        constr,
-        validator,
-    )
-else:
-    from pydantic import (
-        Extra,
-        StrictBool,
-        StrictInt,
-        StrictStr,
-        conint,
-        constr,
-        validator,
-    )
-
+from synapse._pydantic_compat import (
+    Extra,
+    StrictBool,
+    StrictInt,
+    StrictStr,
+    conint,
+    constr,
+    validator,
+)
 from synapse.types.rest import RequestBodyModel
 from synapse.util.threepids import validate_email
 
@@ -120,6 +106,9 @@ class SlidingSyncBody(RequestBodyModel):
     Sliding Sync API request body.
 
     Attributes:
+        conn_id: An optional string to identify this connection to the server.
+            Only one sliding sync connection is allowed per given conn_id (empty
+            or not).
         lists: Sliding window API. A map of list key to list information
             (:class:`SlidingSyncList`). Max lists: 100. The list keys should be
             arbitrary strings which the client is using to refer to the list. Keep this
@@ -200,9 +189,6 @@ class SlidingSyncBody(RequestBodyModel):
                     }
 
             timeline_limit: The maximum number of timeline events to return per response.
-            include_heroes: Return a stripped variant of membership events (containing
-                `user_id` and optionally `avatar_url` and `displayname`) for the users used
-                to calculate the room name.
             filters: Filters to apply to the list before sorting.
         """
 
@@ -268,26 +254,133 @@ class SlidingSyncBody(RequestBodyModel):
         if TYPE_CHECKING:
             ranges: Optional[List[Tuple[int, int]]] = None
         else:
-            ranges: Optional[List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]]] = None  # type: ignore[valid-type]
+            ranges: Optional[
+                List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]]
+            ] = None  # type: ignore[valid-type]
         slow_get_all_rooms: Optional[StrictBool] = False
-        include_heroes: Optional[StrictBool] = False
         filters: Optional[Filters] = None
 
     class RoomSubscription(CommonRoomParameters):
         pass
 
-    class Extension(RequestBodyModel):
-        enabled: Optional[StrictBool] = False
-        lists: Optional[List[StrictStr]] = None
-        rooms: Optional[List[StrictStr]] = None
+    class Extensions(RequestBodyModel):
+        """The extensions section of the request.
+
+        Extensions MUST have an `enabled` flag which defaults to `false`. If a client
+        sends an unknown extension name, the server MUST ignore it (or else backwards
+        compatibility between clients and servers is broken when a newer client tries to
+        communicate with an older server).
+        """
+
+        class ToDeviceExtension(RequestBodyModel):
+            """The to-device extension (MSC3885)
+
+            Attributes:
+                enabled
+                limit: Maximum number of to-device messages to return
+                since: The `next_batch` from the previous sync response
+            """
+
+            enabled: Optional[StrictBool] = False
+            limit: StrictInt = 100
+            since: Optional[StrictStr] = None
+
+            @validator("since")
+            def since_token_check(
+                cls, value: Optional[StrictStr]
+            ) -> Optional[StrictStr]:
+                # `since` comes in as an opaque string token but we know that it's just
+                # an integer representing the position in the device inbox stream. We
+                # want to pre-validate it to make sure it works fine in downstream code.
+                if value is None:
+                    return value
+
+                try:
+                    int(value)
+                except ValueError:
+                    raise ValueError(
+                        "'extensions.to_device.since' is invalid (should look like an int)"
+                    )
+
+                return value
+
+        class E2eeExtension(RequestBodyModel):
+            """The E2EE device extension (MSC3884)
+
+            Attributes:
+                enabled
+            """
+
+            enabled: Optional[StrictBool] = False
+
+        class AccountDataExtension(RequestBodyModel):
+            """The Account Data extension (MSC3959)
+
+            Attributes:
+                enabled
+                lists: List of list keys (from the Sliding Window API) to apply this
+                    extension to.
+                rooms: List of room IDs (from the Room Subscription API) to apply this
+                    extension to.
+            """
+
+            enabled: Optional[StrictBool] = False
+            # Process all lists defined in the Sliding Window API. (This is the default.)
+            lists: Optional[List[StrictStr]] = ["*"]
+            # Process all room subscriptions defined in the Room Subscription API. (This is the default.)
+            rooms: Optional[List[StrictStr]] = ["*"]
+
+        class ReceiptsExtension(RequestBodyModel):
+            """The Receipts extension (MSC3960)
+
+            Attributes:
+                enabled
+                lists: List of list keys (from the Sliding Window API) to apply this
+                    extension to.
+                rooms: List of room IDs (from the Room Subscription API) to apply this
+                    extension to.
+            """
+
+            enabled: Optional[StrictBool] = False
+            # Process all lists defined in the Sliding Window API. (This is the default.)
+            lists: Optional[List[StrictStr]] = ["*"]
+            # Process all room subscriptions defined in the Room Subscription API. (This is the default.)
+            rooms: Optional[List[StrictStr]] = ["*"]
+
+        class TypingExtension(RequestBodyModel):
+            """The Typing Notification extension (MSC3961)
+
+            Attributes:
+                enabled
+                lists: List of list keys (from the Sliding Window API) to apply this
+                    extension to.
+                rooms: List of room IDs (from the Room Subscription API) to apply this
+                    extension to.
+            """
+
+            enabled: Optional[StrictBool] = False
+            # Process all lists defined in the Sliding Window API. (This is the default.)
+            lists: Optional[List[StrictStr]] = ["*"]
+            # Process all room subscriptions defined in the Room Subscription API. (This is the default.)
+            rooms: Optional[List[StrictStr]] = ["*"]
+
+        to_device: Optional[ToDeviceExtension] = None
+        e2ee: Optional[E2eeExtension] = None
+        account_data: Optional[AccountDataExtension] = None
+        receipts: Optional[ReceiptsExtension] = None
+        typing: Optional[TypingExtension] = None
+
+    conn_id: Optional[StrictStr]
 
     # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884
     if TYPE_CHECKING:
         lists: Optional[Dict[str, SlidingSyncList]] = None
     else:
-        lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = None  # type: ignore[valid-type]
+        lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = (
+            None  # type: ignore[valid-type]
+        )
     room_subscriptions: Optional[Dict[StrictStr, RoomSubscription]] = None
-    extensions: Optional[Dict[StrictStr, Extension]] = None
+    extensions: Optional[Extensions] = None
 
     @validator("lists")
     def lists_length_check(
diff --git a/synapse/types/state.py b/synapse/types/state.py
index c958a95701..67d1c3fe97 100644
--- a/synapse/types/state.py
+++ b/synapse/types/state.py
@@ -503,13 +503,19 @@ class StateFilter:
         #   - if so, which event types are excluded? ('excludes')
         #   - which entire event types to include ('wildcards')
         #   - which concrete state keys to include ('concrete state keys')
-        (self_all, self_excludes), (
-            self_wildcards,
-            self_concrete_keys,
+        (
+            (self_all, self_excludes),
+            (
+                self_wildcards,
+                self_concrete_keys,
+            ),
         ) = self._decompose_into_four_parts()
-        (other_all, other_excludes), (
-            other_wildcards,
-            other_concrete_keys,
+        (
+            (other_all, other_excludes),
+            (
+                other_wildcards,
+                other_concrete_keys,
+            ),
         ) = other._decompose_into_four_parts()
 
         # Start with an estimate of the difference based on self
@@ -610,6 +616,13 @@ class StateFilter:
 
         return False
 
+    def __bool__(self) -> bool:
+        """Returns true if this state filter will match any state, or false if
+        this is the empty filter"""
+        if self.include_others:
+            return True
+        return bool(self.types)
+
 
 _ALL_STATE_FILTER = StateFilter(types=immutabledict(), include_others=True)
 _ALL_NON_MEMBER_STATE_FILTER = StateFilter(
diff --git a/synapse/types/storage/__init__.py b/synapse/types/storage/__init__.py
new file mode 100644
index 0000000000..fae5449bcc
--- /dev/null
+++ b/synapse/types/storage/__init__.py
@@ -0,0 +1,47 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+# Originally licensed under the Apache License, Version 2.0:
+# <http://www.apache.org/licenses/LICENSE-2.0>.
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+
+
+class _BackgroundUpdates:
+    EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
+    EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
+    DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities"
+    POPULATE_STREAM_ORDERING2 = "populate_stream_ordering2"
+    INDEX_STREAM_ORDERING2 = "index_stream_ordering2"
+    INDEX_STREAM_ORDERING2_CONTAINS_URL = "index_stream_ordering2_contains_url"
+    INDEX_STREAM_ORDERING2_ROOM_ORDER = "index_stream_ordering2_room_order"
+    INDEX_STREAM_ORDERING2_ROOM_STREAM = "index_stream_ordering2_room_stream"
+    INDEX_STREAM_ORDERING2_TS = "index_stream_ordering2_ts"
+    REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column"
+
+    EVENT_EDGES_DROP_INVALID_ROWS = "event_edges_drop_invalid_rows"
+    EVENT_EDGES_REPLACE_INDEX = "event_edges_replace_index"
+
+    EVENTS_POPULATE_STATE_KEY_REJECTIONS = "events_populate_state_key_rejections"
+
+    EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index"
+
+    SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE = (
+        "sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update"
+    )
+    SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE = "sliding_sync_joined_rooms_bg_update"
+    SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE = (
+        "sliding_sync_membership_snapshots_bg_update"
+    )
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 70139beef2..8618bb0651 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -885,3 +885,46 @@ class AwakenableSleeper:
             # Cancel the sleep if we were woken up
             if call.active():
                 call.cancel()
+
+
+class DeferredEvent:
+    """Like threading.Event but for async code"""
+
+    def __init__(self, reactor: IReactorTime) -> None:
+        self._reactor = reactor
+        self._deferred: "defer.Deferred[None]" = defer.Deferred()
+
+    def set(self) -> None:
+        if not self._deferred.called:
+            self._deferred.callback(None)
+
+    def clear(self) -> None:
+        if self._deferred.called:
+            self._deferred = defer.Deferred()
+
+    def is_set(self) -> bool:
+        return self._deferred.called
+
+    async def wait(self, timeout_seconds: float) -> bool:
+        if self.is_set():
+            return True
+
+        # Create a deferred that gets called in N seconds
+        sleep_deferred: "defer.Deferred[None]" = defer.Deferred()
+        call = self._reactor.callLater(timeout_seconds, sleep_deferred.callback, None)
+
+        try:
+            await make_deferred_yieldable(
+                defer.DeferredList(
+                    [sleep_deferred, self._deferred],
+                    fireOnOneCallback=True,
+                    fireOnOneErrback=True,
+                    consumeErrors=True,
+                )
+            )
+        finally:
+            # Cancel the sleep if we were woken up
+            if call.active():
+                call.cancel()
+
+        return self.is_set()
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index 91c335f85b..03503abe0f 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -142,9 +142,9 @@ class StreamChangeCache:
         """
         assert isinstance(stream_pos, int)
 
-        # _cache is not valid at or before the earliest known stream position, so
+        # _cache is not valid before the earliest known stream position, so
         # return that the entity has changed.
-        if stream_pos <= self._earliest_known_stream_pos:
+        if stream_pos < self._earliest_known_stream_pos:
             self.metrics.inc_misses()
             return True
 
@@ -186,7 +186,7 @@ class StreamChangeCache:
             This will be all entities if the given stream position is at or earlier
             than the earliest known stream position.
         """
-        if not self._cache or stream_pos <= self._earliest_known_stream_pos:
+        if not self._cache or stream_pos < self._earliest_known_stream_pos:
             self.metrics.inc_misses()
             return set(entities)
 
@@ -238,9 +238,9 @@ class StreamChangeCache:
         """
         assert isinstance(stream_pos, int)
 
-        # _cache is not valid at or before the earliest known stream position, so
+        # _cache is not valid before the earliest known stream position, so
         # return that an entity has changed.
-        if stream_pos <= self._earliest_known_stream_pos:
+        if stream_pos < self._earliest_known_stream_pos:
             self.metrics.inc_misses()
             return True
 
@@ -270,9 +270,9 @@ class StreamChangeCache:
         """
         assert isinstance(stream_pos, int)
 
-        # _cache is not valid at or before the earliest known stream position, so
+        # _cache is not valid before the earliest known stream position, so
         # return None to mark that it is unknown if an entity has changed.
-        if stream_pos <= self._earliest_known_stream_pos:
+        if stream_pos < self._earliest_known_stream_pos:
             return AllEntitiesChangedResult(None)
 
         changed_entities: List[EntityType] = []
@@ -327,7 +327,7 @@ class StreamChangeCache:
             for entity in r:
                 self._entity_to_key.pop(entity, None)
 
-    def get_max_pos_of_last_change(self, entity: EntityType) -> int:
+    def get_max_pos_of_last_change(self, entity: EntityType) -> Optional[int]:
         """Returns an upper bound of the stream id of the last change to an
         entity.
 
@@ -335,7 +335,11 @@ class StreamChangeCache:
             entity: The entity to check.
 
         Return:
-            The stream position of the latest change for the given entity or
-            the earliest known stream position if the entitiy is unknown.
+            The stream position of the latest change for the given entity, if
+            known
         """
-        return self._entity_to_key.get(entity, self._earliest_known_stream_pos)
+        return self._entity_to_key.get(entity)
+
+    def get_earliest_known_position(self) -> int:
+        """Returns the earliest position in the cache."""
+        return self._earliest_known_stream_pos
diff --git a/synapse/util/events.py b/synapse/util/events.py
new file mode 100644
index 0000000000..ad9b946578
--- /dev/null
+++ b/synapse/util/events.py
@@ -0,0 +1,29 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+#
+
+from synapse.util.stringutils import random_string
+
+
+def generate_fake_event_id() -> str:
+    """
+    Generate an event ID from random ASCII characters.
+
+    This is primarily useful for generating fake event IDs in response to
+    requests from shadow-banned users.
+
+    Returns:
+        A string intended to look like an event ID, but with no actual meaning.
+    """
+    return "$" + random_string(43)
diff --git a/synapse/util/linked_list.py b/synapse/util/linked_list.py
index e9a5fff211..87f801c0cf 100644
--- a/synapse/util/linked_list.py
+++ b/synapse/util/linked_list.py
@@ -19,8 +19,7 @@
 #
 #
 
-"""A circular doubly linked list implementation.
-"""
+"""A circular doubly linked list implementation."""
 
 import threading
 from typing import Generic, Optional, Type, TypeVar
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 517e79ce5f..020618598c 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -110,7 +110,7 @@ def measure_func(
     """
 
     def wrapper(
-        func: Callable[Concatenate[HasClock, P], Awaitable[R]]
+        func: Callable[Concatenate[HasClock, P], Awaitable[R]],
     ) -> Callable[P, Awaitable[R]]:
         block_name = func.__name__ if name is None else name
 
diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py
index 46dad32156..56bdf451da 100644
--- a/synapse/util/patch_inline_callbacks.py
+++ b/synapse/util/patch_inline_callbacks.py
@@ -50,7 +50,7 @@ def do_patch() -> None:
         return
 
     def new_inline_callbacks(
-        f: Callable[P, Generator["Deferred[object]", object, T]]
+        f: Callable[P, Generator["Deferred[object]", object, T]],
     ) -> Callable[P, "Deferred[T]"]:
         @functools.wraps(f)
         def wrapped(*args: P.args, **kwargs: P.kwargs) -> "Deferred[T]":
diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py
index 8ead72bb7a..3f067b792c 100644
--- a/synapse/util/ratelimitutils.py
+++ b/synapse/util/ratelimitutils.py
@@ -103,7 +103,7 @@ _rate_limiter_instances_lock = threading.Lock()
 
 
 def _get_counts_from_rate_limiter_instance(
-    count_func: Callable[["FederationRateLimiter"], int]
+    count_func: Callable[["FederationRateLimiter"], int],
 ) -> Mapping[Tuple[str, ...], int]:
     """Returns a count of something (slept/rejected hosts) by (metrics_name)"""
     # Cast to a list to prevent it changing while the Prometheus
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 128413c8aa..3a2782bade 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -135,9 +135,9 @@ async def filter_events_for_client(
         retention_policies: Dict[str, RetentionPolicy] = {}
 
         for room_id in room_ids:
-            retention_policies[room_id] = (
-                await storage.main.get_retention_policy_for_room(room_id)
-            )
+            retention_policies[
+                room_id
+            ] = await storage.main.get_retention_policy_for_room(room_id)
 
     def allowed(event: EventBase) -> Optional[EventBase]:
         state_after_event = event_id_to_state.get(event.event_id)
diff --git a/synmark/__init__.py b/synmark/__init__.py
index 8c47e50c7c..887fec2f96 100644
--- a/synmark/__init__.py
+++ b/synmark/__init__.py
@@ -27,7 +27,9 @@ from synapse.types import ISynapseReactor
 try:
     from twisted.internet.epollreactor import EPollReactor as Reactor
 except ImportError:
-    from twisted.internet.pollreactor import PollReactor as Reactor  # type: ignore[assignment]
+    from twisted.internet.pollreactor import (  # type: ignore[assignment]
+        PollReactor as Reactor,
+    )
 from twisted.internet.main import installReactor
 
 
diff --git a/synmark/__main__.py b/synmark/__main__.py
index cac57cf111..4944c2f3b0 100644
--- a/synmark/__main__.py
+++ b/synmark/__main__.py
@@ -40,7 +40,7 @@ T = TypeVar("T")
 
 
 def make_test(
-    main: Callable[[ISynapseReactor, int], Coroutine[Any, Any, float]]
+    main: Callable[[ISynapseReactor, int], Coroutine[Any, Any, float]],
 ) -> Callable[[int], float]:
     """
     Take a benchmark function and wrap it in a reactor start and stop.
@@ -90,6 +90,10 @@ if __name__ == "__main__":
 
     if runner.args.worker:
         if runner.args.log:
+            # sys.__stdout__ can technically be None, just exit if it's the case
+            if not sys.__stdout__:
+                exit(1)
+
             globalLogBeginner.beginLoggingTo(
                 [textFileLogObserver(sys.__stdout__)], redirectStandardIO=False
             )
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
index a1c7ccdd0b..730b00a9fb 100644
--- a/tests/appservice/test_scheduler.py
+++ b/tests/appservice/test_scheduler.py
@@ -150,7 +150,8 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
         self.assertEqual(1, len(self.txnctrl.recoverers))  # and stored
         self.assertEqual(0, txn.complete.call_count)  # txn not completed
         self.store.set_appservice_state.assert_called_once_with(
-            service, ApplicationServiceState.DOWN  # service marked as down
+            service,
+            ApplicationServiceState.DOWN,  # service marked as down
         )
 
 
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index 479d2aab91..c5dee06af5 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -19,13 +19,23 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
+import tempfile
+from typing import Callable
+
 import yaml
+from parameterized import parameterized
 
 from synapse.config import ConfigError
+from synapse.config._base import RootConfig
 from synapse.config.homeserver import HomeServerConfig
 
 from tests.config.utils import ConfigFileTestCase
 
+try:
+    import hiredis
+except ImportError:
+    hiredis = None  # type: ignore
+
 
 class ConfigLoadingFileTestCase(ConfigFileTestCase):
     def test_load_fails_if_server_name_missing(self) -> None:
@@ -116,3 +126,49 @@ class ConfigLoadingFileTestCase(ConfigFileTestCase):
         self.add_lines_to_config(["trust_identity_server_for_password_resets: true"])
         with self.assertRaises(ConfigError):
             HomeServerConfig.load_config("", ["-c", self.config_file])
+
+    @parameterized.expand(
+        [
+            "turn_shared_secret_path: /does/not/exist",
+            "registration_shared_secret_path: /does/not/exist",
+            *["redis:\n  enabled: true\n  password_path: /does/not/exist"]
+            * (hiredis is not None),
+        ]
+    )
+    def test_secret_files_missing(self, config_str: str) -> None:
+        self.generate_config()
+        self.add_lines_to_config(["", config_str])
+
+        with self.assertRaises(ConfigError):
+            HomeServerConfig.load_config("", ["-c", self.config_file])
+
+    @parameterized.expand(
+        [
+            (
+                "turn_shared_secret_path: {}",
+                lambda c: c.voip.turn_shared_secret,
+            ),
+            (
+                "registration_shared_secret_path: {}",
+                lambda c: c.registration.registration_shared_secret,
+            ),
+            *[
+                (
+                    "redis:\n  enabled: true\n  password_path: {}",
+                    lambda c: c.redis.redis_password,
+                )
+            ]
+            * (hiredis is not None),
+        ]
+    )
+    def test_secret_files_existing(
+        self, config_line: str, get_secret: Callable[[RootConfig], str]
+    ) -> None:
+        self.generate_config_and_remove_lines_containing("registration_shared_secret")
+        with tempfile.NamedTemporaryFile(buffering=0) as secret_file:
+            secret_file.write(b"53C237")
+
+            self.add_lines_to_config(["", config_line.format(secret_file.name)])
+            config = HomeServerConfig.load_config("", ["-c", self.config_file])
+
+            self.assertEqual(get_secret(config), "53C237")
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index 30f8787758..654e6521a2 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -756,7 +756,8 @@ class SerializeEventTestCase(stdlib_unittest.TestCase):
     def test_event_fields_fail_if_fields_not_str(self) -> None:
         with self.assertRaises(TypeError):
             self.serialize(
-                MockEvent(room_id="!foo:bar", content={"foo": "bar"}), ["room_id", 4]  # type: ignore[list-item]
+                MockEvent(room_id="!foo:bar", content={"foo": "bar"}),
+                ["room_id", 4],  # type: ignore[list-item]
             )
 
 
diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py
index 9bd97e5d4e..87b9ffc0c6 100644
--- a/tests/federation/test_complexity.py
+++ b/tests/federation/test_complexity.py
@@ -158,7 +158,9 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
         async def get_current_state_event_counts(room_id: str) -> int:
             return 600
 
-        self.hs.get_datastores().main.get_current_state_event_counts = get_current_state_event_counts  # type: ignore[method-assign]
+        self.hs.get_datastores().main.get_current_state_event_counts = (  # type: ignore[method-assign]
+            get_current_state_event_counts
+        )
 
         d = handler._remote_join(
             create_requester(u1),
diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py
index 08214b0013..1e1ed8e642 100644
--- a/tests/federation/test_federation_catch_up.py
+++ b/tests/federation/test_federation_catch_up.py
@@ -401,7 +401,10 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
         now = self.clock.time_msec()
         self.get_success(
             self.hs.get_datastores().main.set_destination_retry_timings(
-                "zzzerver", now, now, 24 * 60 * 60 * 1000  # retry in 1 day
+                "zzzerver",
+                now,
+                now,
+                24 * 60 * 60 * 1000,  # retry in 1 day
             )
         )
 
diff --git a/tests/federation/test_federation_media.py b/tests/federation/test_federation_media.py
index 142f73cfdb..e66aae499b 100644
--- a/tests/federation/test_federation_media.py
+++ b/tests/federation/test_federation_media.py
@@ -35,11 +35,11 @@ from synapse.types import UserID
 from synapse.util import Clock
 
 from tests import unittest
+from tests.media.test_media_storage import small_png
 from tests.test_utils import SMALL_PNG
 
 
 class FederationMediaDownloadsTest(unittest.FederatingHomeserverTestCase):
-
     def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         super().prepare(reactor, clock, hs)
         self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-")
@@ -146,3 +146,111 @@ class FederationMediaDownloadsTest(unittest.FederatingHomeserverTestCase):
         # check that the png file exists and matches what was uploaded
         found_file = any(SMALL_PNG in field for field in stripped_bytes)
         self.assertTrue(found_file)
+
+
+class FederationThumbnailTest(unittest.FederatingHomeserverTestCase):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        super().prepare(reactor, clock, hs)
+        self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-")
+        self.addCleanup(shutil.rmtree, self.test_dir)
+        self.primary_base_path = os.path.join(self.test_dir, "primary")
+        self.secondary_base_path = os.path.join(self.test_dir, "secondary")
+
+        hs.config.media.media_store_path = self.primary_base_path
+
+        storage_providers = [
+            StorageProviderWrapper(
+                FileStorageProviderBackend(hs, self.secondary_base_path),
+                store_local=True,
+                store_remote=False,
+                store_synchronous=True,
+            )
+        ]
+
+        self.filepaths = MediaFilePaths(self.primary_base_path)
+        self.media_storage = MediaStorage(
+            hs, self.primary_base_path, self.filepaths, storage_providers
+        )
+        self.media_repo = hs.get_media_repository()
+
+    def test_thumbnail_download_scaled(self) -> None:
+        content = io.BytesIO(small_png.data)
+        content_uri = self.get_success(
+            self.media_repo.create_content(
+                "image/png",
+                "test_png_thumbnail",
+                content,
+                67,
+                UserID.from_string("@user_id:whatever.org"),
+            )
+        )
+        # test with an image file
+        channel = self.make_signed_federation_request(
+            "GET",
+            f"/_matrix/federation/v1/media/thumbnail/{content_uri.media_id}?width=32&height=32&method=scale",
+        )
+        self.pump()
+        self.assertEqual(200, channel.code)
+
+        content_type = channel.headers.getRawHeaders("content-type")
+        assert content_type is not None
+        assert "multipart/mixed" in content_type[0]
+        assert "boundary" in content_type[0]
+
+        # extract boundary
+        boundary = content_type[0].split("boundary=")[1]
+        # split on boundary and check that json field and expected value exist
+        body = channel.result.get("body")
+        assert body is not None
+        stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8"))
+        found_json = any(
+            b"\r\nContent-Type: application/json\r\n\r\n{}" in field
+            for field in stripped_bytes
+        )
+        self.assertTrue(found_json)
+
+        # check that the png file exists and matches the expected scaled bytes
+        found_file = any(small_png.expected_scaled in field for field in stripped_bytes)
+        self.assertTrue(found_file)
+
+    def test_thumbnail_download_cropped(self) -> None:
+        content = io.BytesIO(small_png.data)
+        content_uri = self.get_success(
+            self.media_repo.create_content(
+                "image/png",
+                "test_png_thumbnail",
+                content,
+                67,
+                UserID.from_string("@user_id:whatever.org"),
+            )
+        )
+        # test with an image file
+        channel = self.make_signed_federation_request(
+            "GET",
+            f"/_matrix/federation/v1/media/thumbnail/{content_uri.media_id}?width=32&height=32&method=crop",
+        )
+        self.pump()
+        self.assertEqual(200, channel.code)
+
+        content_type = channel.headers.getRawHeaders("content-type")
+        assert content_type is not None
+        assert "multipart/mixed" in content_type[0]
+        assert "boundary" in content_type[0]
+
+        # extract boundary
+        boundary = content_type[0].split("boundary=")[1]
+        # split on boundary and check that json field and expected value exist
+        body = channel.result.get("body")
+        assert body is not None
+        stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8"))
+        found_json = any(
+            b"\r\nContent-Type: application/json\r\n\r\n{}" in field
+            for field in stripped_bytes
+        )
+        self.assertTrue(found_json)
+
+        # check that the png file exists and matches the expected cropped bytes
+        found_file = any(
+            small_png.expected_cropped in field for field in stripped_bytes
+        )
+        self.assertTrue(found_file)
diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index 0e6352ff4b..8a3dfdcf75 100644
--- a/tests/handlers/test_e2e_keys.py
+++ b/tests/handlers/test_e2e_keys.py
@@ -43,9 +43,7 @@ from tests.unittest import override_config
 class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
     def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         self.appservice_api = mock.AsyncMock()
-        return self.setup_test_homeserver(
-            federation_client=mock.Mock(), application_service_api=self.appservice_api
-        )
+        return self.setup_test_homeserver(application_service_api=self.appservice_api)
 
     def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.handler = hs.get_e2e_keys_handler()
@@ -1224,6 +1222,61 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
             },
         )
 
+    def test_query_devices_remote_down(self) -> None:
+        """Tests that querying keys for a remote user on an unreachable server returns
+        results in the "failures" property
+        """
+
+        remote_user_id = "@test:other"
+        local_user_id = "@test:test"
+
+        # The backoff code treats time zero as special
+        self.reactor.advance(5)
+
+        self.hs.get_federation_http_client().agent.request = mock.AsyncMock(  # type: ignore[method-assign]
+            side_effect=Exception("boop")
+        )
+
+        e2e_handler = self.hs.get_e2e_keys_handler()
+
+        query_result = self.get_success(
+            e2e_handler.query_devices(
+                {
+                    "device_keys": {remote_user_id: []},
+                },
+                timeout=10,
+                from_user_id=local_user_id,
+                from_device_id="some_device_id",
+            )
+        )
+
+        self.assertEqual(
+            query_result["failures"],
+            {
+                "other": {
+                    "message": "Failed to send request: Exception: boop",
+                    "status": 503,
+                }
+            },
+        )
+
+        # Do it again: we should hit the backoff
+        query_result = self.get_success(
+            e2e_handler.query_devices(
+                {
+                    "device_keys": {remote_user_id: []},
+                },
+                timeout=10,
+                from_user_id=local_user_id,
+                from_device_id="some_device_id",
+            )
+        )
+
+        self.assertEqual(
+            query_result["failures"],
+            {"other": {"message": "Not ready for retry", "status": 503}},
+        )
+
     @parameterized.expand(
         [
             # The remote homeserver's response indicates that this user has 0/1/2 devices.
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index 3fe5b0a1b4..9847893fce 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -44,7 +44,7 @@ from synapse.rest.client import login, room
 from synapse.server import HomeServer
 from synapse.storage.databases.main.events_worker import EventCacheEntry
 from synapse.util import Clock
-from synapse.util.stringutils import random_string
+from synapse.util.events import generate_fake_event_id
 
 from tests import unittest
 from tests.test_utils import event_injection
@@ -52,10 +52,6 @@ from tests.test_utils import event_injection
 logger = logging.getLogger(__name__)
 
 
-def generate_fake_event_id() -> str:
-    return "$fake_" + random_string(43)
-
-
 class FederationTestCase(unittest.FederatingHomeserverTestCase):
     servlets = [
         admin.register_servlets,
diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py
index 1b83aea579..5db10fa74c 100644
--- a/tests/handlers/test_federation_event.py
+++ b/tests/handlers/test_federation_event.py
@@ -288,13 +288,15 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
         }
 
         # We also expect an outbound request to /state
-        self.mock_federation_transport_client.get_room_state.return_value = StateRequestResponse(
-            # Mimic the other server not knowing about the state at all.
-            # We want to cause Synapse to throw an error (`Unable to get
-            # missing prev_event $fake_prev_event`) and fail to backfill
-            # the pulled event.
-            auth_events=[],
-            state=[],
+        self.mock_federation_transport_client.get_room_state.return_value = (
+            StateRequestResponse(
+                # Mimic the other server not knowing about the state at all.
+                # We want to cause Synapse to throw an error (`Unable to get
+                # missing prev_event $fake_prev_event`) and fail to backfill
+                # the pulled event.
+                auth_events=[],
+                state=[],
+            )
         )
 
         pulled_event = make_event_from_dict(
diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py
index 036c539db2..5b5dc713d1 100644
--- a/tests/handlers/test_oauth_delegation.py
+++ b/tests/handlers/test_oauth_delegation.py
@@ -550,7 +550,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
             access_token="mockAccessToken",
         )
 
-        self.assertEqual(channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body)
+        self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body)
 
     def expect_unauthorized(
         self, method: str, path: str, content: Union[bytes, str, JsonDict] = ""
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index cc630d606c..598d6c13cd 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -1107,7 +1107,9 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase):
                 ),
             ]
         ],
-        name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[5] else 'monolith'}",
+        name_func=lambda testcase_func,
+        param_num,
+        params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[5] else 'monolith'}",
     )
     @unittest.override_config({"experimental_features": {"msc3026_enabled": True}})
     def test_set_presence_from_syncing_multi_device(
@@ -1343,7 +1345,9 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase):
                 ),
             ]
         ],
-        name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[4] else 'monolith'}",
+        name_func=lambda testcase_func,
+        param_num,
+        params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[4] else 'monolith'}",
     )
     @unittest.override_config({"experimental_features": {"msc3026_enabled": True}})
     def test_set_presence_from_non_syncing_multi_device(
diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py
index 213a66ed1a..ad77356ede 100644
--- a/tests/handlers/test_room_member.py
+++ b/tests/handlers/test_room_member.py
@@ -6,7 +6,7 @@ import synapse.rest.admin
 import synapse.rest.client.login
 import synapse.rest.client.room
 from synapse.api.constants import EventTypes, Membership
-from synapse.api.errors import LimitExceededError, SynapseError
+from synapse.api.errors import Codes, LimitExceededError, SynapseError
 from synapse.crypto.event_signing import add_hashes_and_signatures
 from synapse.events import FrozenEventV3
 from synapse.federation.federation_client import SendJoinResult
@@ -380,9 +380,29 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase):
         )
 
     def test_forget_when_not_left(self) -> None:
-        """Tests that a user cannot not forgets a room that has not left."""
+        """Tests that a user cannot forget a room that they are still in."""
         self.get_failure(self.handler.forget(self.alice_ID, self.room_id), SynapseError)
 
+    def test_nonlocal_room_user_action(self) -> None:
+        """
+        Test that non-local user ids cannot perform room actions through
+        this homeserver.
+        """
+        alien_user_id = UserID.from_string("@cheeky_monkey:matrix.org")
+        bad_room_id = f"{self.room_id}+BAD_ID"
+
+        exc = self.get_failure(
+            self.handler.update_membership(
+                create_requester(self.alice),
+                alien_user_id,
+                bad_room_id,
+                "unban",
+            ),
+            SynapseError,
+        ).value
+
+        self.assertEqual(exc.errcode, Codes.BAD_JSON)
+
     def test_rejoin_forgotten_by_user(self) -> None:
         """Test that a user that has forgotten a room can do a re-join.
         The room was not forgotten from the local server.
diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py
index 244a4e7689..b55fa1a8fd 100644
--- a/tests/handlers/test_room_summary.py
+++ b/tests/handlers/test_room_summary.py
@@ -757,6 +757,54 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
             )
         self._assert_hierarchy(result, expected)
 
+    def test_fed_root(self) -> None:
+        """
+        Test if requested room is available over federation.
+        """
+        fed_hostname = self.hs.hostname + "2"
+        fed_space = "#fed_space:" + fed_hostname
+        fed_subroom = "#fed_sub_room:" + fed_hostname
+
+        requested_room_entry = _RoomEntry(
+            fed_space,
+            {
+                "room_id": fed_space,
+                "world_readable": True,
+                "room_type": RoomTypes.SPACE,
+            },
+            [
+                {
+                    "type": EventTypes.SpaceChild,
+                    "room_id": fed_space,
+                    "state_key": fed_subroom,
+                    "content": {"via": [fed_hostname]},
+                }
+            ],
+        )
+        child_room = {
+            "room_id": fed_subroom,
+            "world_readable": True,
+        }
+
+        async def summarize_remote_room_hierarchy(
+            _self: Any, room: Any, suggested_only: bool
+        ) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]:
+            return requested_room_entry, {fed_subroom: child_room}, set()
+
+        expected = [
+            (fed_space, [fed_subroom]),
+            (fed_subroom, ()),
+        ]
+
+        with mock.patch(
+            "synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy",
+            new=summarize_remote_room_hierarchy,
+        ):
+            result = self.get_success(
+                self.handler.get_room_hierarchy(create_requester(self.user), fed_space)
+            )
+        self._assert_hierarchy(result, expected)
+
     def test_fed_filtering(self) -> None:
         """
         Rooms returned over federation should be properly filtered to only include
diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
index 5f83b637c5..9a68d1dd95 100644
--- a/tests/handlers/test_sliding_sync.py
+++ b/tests/handlers/test_sliding_sync.py
@@ -18,32 +18,36 @@
 #
 #
 import logging
-from copy import deepcopy
-from typing import Optional
+from typing import AbstractSet, Dict, Mapping, Optional, Set, Tuple
 from unittest.mock import patch
 
+import attr
 from parameterized import parameterized
 
 from twisted.test.proto_helpers import MemoryReactor
 
 from synapse.api.constants import (
-    AccountDataTypes,
-    EventContentFields,
     EventTypes,
     JoinRules,
     Membership,
-    RoomTypes,
 )
 from synapse.api.room_versions import RoomVersions
-from synapse.handlers.sliding_sync import RoomSyncConfig, StateValues
+from synapse.handlers.sliding_sync import (
+    RoomsForUserType,
+    RoomSyncConfig,
+    StateValues,
+    _required_state_changes,
+)
 from synapse.rest import admin
 from synapse.rest.client import knock, login, room
 from synapse.server import HomeServer
 from synapse.storage.util.id_generators import MultiWriterIdGenerator
-from synapse.types import JsonDict, UserID
-from synapse.types.handlers import SlidingSyncConfig
+from synapse.types import JsonDict, StateMap, StreamToken, UserID
+from synapse.types.handlers.sliding_sync import SlidingSyncConfig
+from synapse.types.state import StateFilter
 from synapse.util import Clock
 
+from tests import unittest
 from tests.replication._base import BaseMultiWorkerStreamTestCase
 from tests.unittest import HomeserverTestCase, TestCase
 
@@ -560,28 +564,16 @@ class RoomSyncConfigTestCase(TestCase):
         """
         Combine A into B and B into A to make sure we get the same result.
         """
-        # Since we're mutating these in place, make a copy for each of our trials
-        room_sync_config_a = deepcopy(a)
-        room_sync_config_b = deepcopy(b)
+        combined_config = a.combine_room_sync_config(b)
+        self._assert_room_config_equal(combined_config, expected, "B into A")
 
-        # Combine B into A
-        room_sync_config_a.combine_room_sync_config(room_sync_config_b)
-
-        self._assert_room_config_equal(room_sync_config_a, expected, "B into A")
-
-        # Since we're mutating these in place, make a copy for each of our trials
-        room_sync_config_a = deepcopy(a)
-        room_sync_config_b = deepcopy(b)
-
-        # Combine A into B
-        room_sync_config_b.combine_room_sync_config(room_sync_config_a)
-
-        self._assert_room_config_equal(room_sync_config_b, expected, "A into B")
+        combined_config = a.combine_room_sync_config(b)
+        self._assert_room_config_equal(combined_config, expected, "A into B")
 
 
-class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
+class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
     """
-    Tests Sliding Sync handler `get_sync_room_ids_for_user()` to make sure it returns
+    Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it returns
     the correct list of rooms IDs.
     """
 
@@ -613,8 +605,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         now_token = self.event_sources.get_current_token()
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, _, _ = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=now_token,
                 to_token=now_token,
@@ -640,8 +632,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         after_room_token = self.event_sources.get_current_token()
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room_token,
                 to_token=after_room_token,
@@ -655,9 +647,11 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
             room_id_results[room_id].event_id,
             join_response["event_id"],
         )
+        self.assertEqual(room_id_results[room_id].membership, Membership.JOIN)
         # We should be considered `newly_joined` because we joined during the token
         # range
-        self.assertEqual(room_id_results[room_id].newly_joined, True)
+        self.assertTrue(room_id in newly_joined)
+        self.assertTrue(room_id not in newly_left)
 
     def test_get_already_joined_room(self) -> None:
         """
@@ -673,8 +667,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         after_room_token = self.event_sources.get_current_token()
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_room_token,
                 to_token=after_room_token,
@@ -688,8 +682,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
             room_id_results[room_id].event_id,
             join_response["event_id"],
         )
+        self.assertEqual(room_id_results[room_id].membership, Membership.JOIN)
         # We should *NOT* be `newly_joined` because we joined before the token range
-        self.assertEqual(room_id_results[room_id].newly_joined, False)
+        self.assertTrue(room_id not in newly_joined)
+        self.assertTrue(room_id not in newly_left)
 
     def test_get_invited_banned_knocked_room(self) -> None:
         """
@@ -745,8 +741,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         after_room_token = self.event_sources.get_current_token()
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room_token,
                 to_token=after_room_token,
@@ -768,19 +764,25 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
             room_id_results[invited_room_id].event_id,
             invite_response["event_id"],
         )
+        self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE)
+        self.assertTrue(invited_room_id not in newly_joined)
+        self.assertTrue(invited_room_id not in newly_left)
+
         self.assertEqual(
             room_id_results[ban_room_id].event_id,
             ban_response["event_id"],
         )
+        self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN)
+        self.assertTrue(ban_room_id not in newly_joined)
+        self.assertTrue(ban_room_id not in newly_left)
+
         self.assertEqual(
             room_id_results[knock_room_id].event_id,
             knock_room_membership_state_event.event_id,
         )
-        # We should *NOT* be `newly_joined` because we were not joined at the the time
-        # of the `to_token`.
-        self.assertEqual(room_id_results[invited_room_id].newly_joined, False)
-        self.assertEqual(room_id_results[ban_room_id].newly_joined, False)
-        self.assertEqual(room_id_results[knock_room_id].newly_joined, False)
+        self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK)
+        self.assertTrue(knock_room_id not in newly_joined)
+        self.assertTrue(knock_room_id not in newly_left)
 
     def test_get_kicked_room(self) -> None:
         """
@@ -811,8 +813,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         after_kick_token = self.event_sources.get_current_token()
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_kick_token,
                 to_token=after_kick_token,
@@ -826,9 +828,12 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
             room_id_results[kick_room_id].event_id,
             kick_response["event_id"],
         )
+        self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE)
+        self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id)
         # We should *NOT* be `newly_joined` because we were not joined at the the time
         # of the `to_token`.
-        self.assertEqual(room_id_results[kick_room_id].newly_joined, False)
+        self.assertTrue(kick_room_id not in newly_joined)
+        self.assertTrue(kick_room_id not in newly_left)
 
     def test_forgotten_rooms(self) -> None:
         """
@@ -901,8 +906,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         )
         self.assertEqual(channel.code, 200, channel.result)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room_forgets,
                 to_token=before_room_forgets,
@@ -912,52 +917,58 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # We shouldn't see the room because it was forgotten
         self.assertEqual(room_id_results.keys(), set())
 
-    def test_only_newly_left_rooms_show_up(self) -> None:
+    def test_newly_left_rooms(self) -> None:
         """
-        Test that newly_left rooms still show up in the sync response but rooms that
-        were left before the `from_token` don't show up. See condition "2)" comments in
-        the `get_sync_room_ids_for_user` method.
+        Test that newly_left are marked properly
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
 
         # Leave before we calculate the `from_token`
         room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
         # Leave during the from_token/to_token range (newly_left)
         room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
-        _leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok)
+        leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok)
 
         after_room2_token = self.event_sources.get_current_token()
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_room1_token,
                 to_token=after_room2_token,
             )
         )
 
-        # Only the newly_left room should show up
-        self.assertEqual(room_id_results.keys(), {room_id2})
-        # It should be pointing to the latest membership event in the from/to range but
-        # the `event_id` is `None` because we left the room causing the server to leave
-        # the room because no other local users are in it (quirk of the
-        # `current_state_delta_stream` table that we source things from)
+        self.assertEqual(room_id_results.keys(), {room_id1, room_id2})
+
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            leave_response1["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+        # We should *NOT* be `newly_joined` or `newly_left` because that happened before
+        # the from/to range
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
+
         self.assertEqual(
             room_id_results[room_id2].event_id,
-            None,  # _leave_response2["event_id"],
+            leave_response2["event_id"],
         )
+        self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE)
         # We should *NOT* be `newly_joined` because we are instead `newly_left`
-        self.assertEqual(room_id_results[room_id2].newly_joined, False)
+        self.assertTrue(room_id2 not in newly_joined)
+        self.assertTrue(room_id2 in newly_left)
 
     def test_no_joins_after_to_token(self) -> None:
         """
         Rooms we join after the `to_token` should *not* show up. See condition "1b)"
-        comments in the `get_sync_room_ids_for_user()` method.
+        comments in the `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -975,8 +986,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
         self.helper.join(room_id2, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room1_token,
                 to_token=after_room1_token,
@@ -989,14 +1000,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
             room_id_results[room_id1].event_id,
             join_response1["event_id"],
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should be `newly_joined` because we joined during the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, True)
+        self.assertTrue(room_id1 in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_join_during_range_and_left_room_after_to_token(self) -> None:
         """
         Room still shows up if we left the room but were joined during the
         from_token/to_token. See condition "1a)" comments in the
-        `get_sync_room_ids_for_user()` method.
+        `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1013,8 +1026,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # Leave the room after we already have our tokens
         leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room1_token,
                 to_token=after_room1_token,
@@ -1036,14 +1049,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should be `newly_joined` because we joined during the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, True)
+        self.assertTrue(room_id1 in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_join_before_range_and_left_room_after_to_token(self) -> None:
         """
         Room still shows up if we left the room but were joined before the `from_token`
         so it should show up. See condition "1a)" comments in the
-        `get_sync_room_ids_for_user()` method.
+        `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1058,8 +1073,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # Leave the room after we already have our tokens
         leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_room1_token,
                 to_token=after_room1_token,
@@ -1080,14 +1095,16 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should *NOT* be `newly_joined` because we joined before the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_kicked_before_range_and_left_after_to_token(self) -> None:
         """
         Room still shows up if we left the room but were kicked before the `from_token`
         so it should show up. See condition "1a)" comments in the
-        `get_sync_room_ids_for_user()` method.
+        `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1120,8 +1137,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         join_response2 = self.helper.join(kick_room_id, user1_id, tok=user1_tok)
         leave_response = self.helper.leave(kick_room_id, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_kick_token,
                 to_token=after_kick_token,
@@ -1144,14 +1161,17 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE)
+        self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id)
         # We should *NOT* be `newly_joined` because we were kicked
-        self.assertEqual(room_id_results[kick_room_id].newly_joined, False)
+        self.assertTrue(kick_room_id not in newly_joined)
+        self.assertTrue(kick_room_id not in newly_left)
 
     def test_newly_left_during_range_and_join_leave_after_to_token(self) -> None:
         """
         Newly left room should show up. But we're also testing that joining and leaving
         after the `to_token` doesn't mess with the results. See condition "2)" and "1a)"
-        comments in the `get_sync_room_ids_for_user()` method.
+        comments in the `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1173,8 +1193,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
         leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room1_token,
                 to_token=after_room1_token,
@@ -1197,14 +1217,17 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
-        # We should *NOT* be `newly_joined` because we left during the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+        self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+        # We should *NOT* be `newly_joined` because we are actually `newly_left` during
+        # the token range
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 in newly_left)
 
     def test_newly_left_during_range_and_join_after_to_token(self) -> None:
         """
         Newly left room should show up. But we're also testing that joining after the
         `to_token` doesn't mess with the results. See condition "2)" and "1b)" comments
-        in the `get_sync_room_ids_for_user()` method.
+        in the `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1225,8 +1248,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # Join the room after we already have our tokens
         join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room1_token,
                 to_token=after_room1_token,
@@ -1248,16 +1271,19 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
-        # We should *NOT* be `newly_joined` because we left during the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+        self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+        # We should *NOT* be `newly_joined` because we are actually `newly_left` during
+        # the token range
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 in newly_left)
 
     def test_no_from_token(self) -> None:
         """
-        Test that if we don't provide a `from_token`, we get all the rooms that we we're
-        joined up to the `to_token`.
+        Test that if we don't provide a `from_token`, we get all the rooms that we had
+        membership in up to the `to_token`.
 
-        Providing `from_token` only really has the effect that it adds `newly_left`
-        rooms to the response.
+        Providing `from_token` only really has the effect that it marks rooms as
+        `newly_left` in the response.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1274,15 +1300,15 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # Join and leave the room2 before the `to_token`
         self.helper.join(room_id2, user1_id, tok=user1_tok)
-        self.helper.leave(room_id2, user1_id, tok=user1_tok)
+        leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
         # Join the room2 after we already have our tokens
         self.helper.join(room_id2, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=None,
                 to_token=after_room1_token,
@@ -1290,15 +1316,31 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         )
 
         # Only rooms we were joined to before the `to_token` should show up
-        self.assertEqual(room_id_results.keys(), {room_id1})
+        self.assertEqual(room_id_results.keys(), {room_id1, room_id2})
+
+        # Room1
         # It should be pointing to the latest membership event in the from/to range
         self.assertEqual(
             room_id_results[room_id1].event_id,
             join_response1["event_id"],
         )
-        # We should *NOT* be `newly_joined` because there is no `from_token` to
-        # define a "live" range to compare against
-        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
+        # We should *NOT* be `newly_joined`/`newly_left` because there is no
+        # `from_token` to define a "live" range to compare against
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
+
+        # Room2
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id2].event_id,
+            leave_response2["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE)
+        # We should *NOT* be `newly_joined`/`newly_left` because there is no
+        # `from_token` to define a "live" range to compare against
+        self.assertTrue(room_id2 not in newly_joined)
+        self.assertTrue(room_id2 not in newly_left)
 
     def test_from_token_ahead_of_to_token(self) -> None:
         """
@@ -1317,28 +1359,28 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
         room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
 
-        # Join room1 before `before_room_token`
-        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        # Join room1 before `to_token`
+        join_room1_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
-        # Join and leave the room2 before `before_room_token`
-        self.helper.join(room_id2, user1_id, tok=user1_tok)
-        self.helper.leave(room_id2, user1_id, tok=user1_tok)
+        # Join and leave the room2 before `to_token`
+        _join_room2_response1 = self.helper.join(room_id2, user1_id, tok=user1_tok)
+        leave_room2_response1 = self.helper.leave(room_id2, user1_id, tok=user1_tok)
 
         # Note: These are purposely swapped. The `from_token` should come after
         # the `to_token` in this test
         to_token = self.event_sources.get_current_token()
 
-        # Join room2 after `before_room_token`
-        self.helper.join(room_id2, user1_id, tok=user1_tok)
+        # Join room2 after `to_token`
+        _join_room2_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
 
         # --------
 
-        # Join room3 after `before_room_token`
-        self.helper.join(room_id3, user1_id, tok=user1_tok)
+        # Join room3 after `to_token`
+        _join_room3_response1 = self.helper.join(room_id3, user1_id, tok=user1_tok)
 
-        # Join and leave the room4 after `before_room_token`
-        self.helper.join(room_id4, user1_id, tok=user1_tok)
-        self.helper.leave(room_id4, user1_id, tok=user1_tok)
+        # Join and leave the room4 after `to_token`
+        _join_room4_response1 = self.helper.join(room_id4, user1_id, tok=user1_tok)
+        _leave_room4_response1 = self.helper.leave(room_id4, user1_id, tok=user1_tok)
 
         # Note: These are purposely swapped. The `from_token` should come after the
         # `to_token` in this test
@@ -1347,32 +1389,60 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # Join the room4 after we already have our tokens
         self.helper.join(room_id4, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=from_token,
                 to_token=to_token,
             )
         )
 
-        # Only rooms we were joined to before the `to_token` should show up
-        #
-        # There won't be any newly_left rooms because the `from_token` is ahead of the
-        # `to_token` and that range will give no membership changes to check.
-        self.assertEqual(room_id_results.keys(), {room_id1})
+        # In the "current" state snapshot, we're joined to all of the rooms but in the
+        # from/to token range...
+        self.assertIncludes(
+            room_id_results.keys(),
+            {
+                # Included because we were joined before both tokens
+                room_id1,
+                # Included because we had membership before the to_token
+                room_id2,
+                # Excluded because we joined after the `to_token`
+                # room_id3,
+                # Excluded because we joined after the `to_token`
+                # room_id4,
+            },
+            exact=True,
+        )
+
+        # Room1
         # It should be pointing to the latest membership event in the from/to range
         self.assertEqual(
             room_id_results[room_id1].event_id,
-            join_response1["event_id"],
+            join_room1_response1["event_id"],
         )
-        # We should *NOT* be `newly_joined` because we joined `room1` before either of the tokens
-        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
+        # We should *NOT* be `newly_joined`/`newly_left` because we joined `room1`
+        # before either of the tokens
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
+
+        # Room2
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id2].event_id,
+            leave_room2_response1["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE)
+        # We should *NOT* be `newly_joined`/`newly_left` because we joined and left
+        # `room1` before either of the tokens
+        self.assertTrue(room_id2 not in newly_joined)
+        self.assertTrue(room_id2 not in newly_left)
 
     def test_leave_before_range_and_join_leave_after_to_token(self) -> None:
         """
-        Old left room shouldn't show up. But we're also testing that joining and leaving
-        after the `to_token` doesn't mess with the results. See condition "1a)" comments
-        in the `get_sync_room_ids_for_user()` method.
+        Test old left rooms. But we're also testing that joining and leaving after the
+        `to_token` doesn't mess with the results. See condition "1a)" comments in the
+        `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1384,7 +1454,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
         # Join and leave the room before the from/to range
         self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
@@ -1392,22 +1462,31 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         self.helper.join(room_id1, user1_id, tok=user1_tok)
         self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_room1_token,
                 to_token=after_room1_token,
             )
         )
 
-        # Room shouldn't show up because it was left before the `from_token`
-        self.assertEqual(room_id_results.keys(), set())
+        self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            leave_response["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+        # We should *NOT* be `newly_joined`/`newly_left` because we joined and left
+        # `room1` before either of the tokens
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_leave_before_range_and_join_after_to_token(self) -> None:
         """
-        Old left room shouldn't show up. But we're also testing that joining after the
-        `to_token` doesn't mess with the results. See condition "1b)" comments in the
-        `get_sync_room_ids_for_user()` method.
+        Test old left room. But we're also testing that joining after the `to_token`
+        doesn't mess with the results. See condition "1b)" comments in the
+        `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1419,32 +1498,40 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
         # Join and leave the room before the from/to range
         self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
         after_room1_token = self.event_sources.get_current_token()
 
         # Join the room after we already have our tokens
         self.helper.join(room_id1, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_room1_token,
                 to_token=after_room1_token,
             )
         )
 
-        # Room shouldn't show up because it was left before the `from_token`
-        self.assertEqual(room_id_results.keys(), set())
+        self.assertEqual(room_id_results.keys(), {room_id1})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            leave_response["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+        # We should *NOT* be `newly_joined`/`newly_left` because we joined and left
+        # `room1` before either of the tokens
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_join_leave_multiple_times_during_range_and_after_to_token(
         self,
     ) -> None:
         """
         Join and leave multiple times shouldn't affect rooms from showing up. It just
-        matters that we were joined or newly_left in the from/to range. But we're also
-        testing that joining and leaving after the `to_token` doesn't mess with the
-        results.
+        matters that we had membership in the from/to range. But we're also testing that
+        joining and leaving after the `to_token` doesn't mess with the results.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1456,7 +1543,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # We create the room with user2 so the room isn't left with no members when we
         # leave and can still re-join.
         room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
-        # Join, leave, join back to the room before the from/to range
+        # Join, leave, join back to the room during the from/to range
         join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
         leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
         join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
@@ -1468,8 +1555,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok)
         leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room1_token,
                 to_token=after_room1_token,
@@ -1494,15 +1581,19 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should be `newly_joined` because we joined during the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, True)
+        self.assertTrue(room_id1 in newly_joined)
+        # We should *NOT* be `newly_left` because we joined during the token range and
+        # was still joined at the end of the range
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_join_leave_multiple_times_before_range_and_after_to_token(
         self,
     ) -> None:
         """
         Join and leave multiple times before the from/to range shouldn't affect rooms
-        from showing up. It just matters that we were joined or newly_left in the
+        from showing up. It just matters that we had membership in the
         from/to range. But we're also testing that joining and leaving after the
         `to_token` doesn't mess with the results.
         """
@@ -1526,8 +1617,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok)
         leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_room1_token,
                 to_token=after_room1_token,
@@ -1552,8 +1643,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should *NOT* be `newly_joined` because we joined before the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_invite_before_range_and_join_leave_after_to_token(
         self,
@@ -1561,7 +1654,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         """
         Make it look like we joined after the token range but we were invited before the
         from/to range so the room should still show up. See condition "1a)" comments in
-        the `get_sync_room_ids_for_user()` method.
+        the `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1583,8 +1676,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         join_respsonse = self.helper.join(room_id1, user1_id, tok=user1_tok)
         leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_room1_token,
                 to_token=after_room1_token,
@@ -1606,9 +1699,11 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.INVITE)
         # We should *NOT* be `newly_joined` because we were only invited before the
         # token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_join_and_display_name_changes_in_token_range(
         self,
@@ -1655,8 +1750,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
             tok=user1_tok,
         )
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room1_token,
                 to_token=after_room1_token,
@@ -1682,8 +1777,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should be `newly_joined` because we joined during the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, True)
+        self.assertTrue(room_id1 in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_display_name_changes_in_token_range(
         self,
@@ -1718,8 +1815,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         after_change1_token = self.event_sources.get_current_token()
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_room1_token,
                 to_token=after_change1_token,
@@ -1742,8 +1839,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should *NOT* be `newly_joined` because we joined before the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_display_name_changes_before_and_after_token_range(
         self,
@@ -1788,8 +1887,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
             tok=user1_tok,
         )
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_room1_token,
                 to_token=after_room1_token,
@@ -1815,8 +1914,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should *NOT* be `newly_joined` because we joined before the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, False)
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_display_name_changes_leave_after_token_range(
         self,
@@ -1826,7 +1927,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         if there are multiple `join` membership events in a row indicating
         `displayname`/`avatar_url` updates and we leave after the `to_token`.
 
-        See condition "1a)" comments in the `get_sync_room_ids_for_user()` method.
+        See condition "1a)" comments in the `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1868,8 +1969,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # Leave after the token
         self.helper.leave(room_id1, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room1_token,
                 to_token=after_room1_token,
@@ -1895,8 +1996,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should be `newly_joined` because we joined during the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, True)
+        self.assertTrue(room_id1 in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_display_name_changes_join_after_token_range(
         self,
@@ -1906,7 +2009,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         indicating `displayname`/`avatar_url` updates doesn't affect the results (we
         joined after the token range so it shouldn't show up)
 
-        See condition "1b)" comments in the `get_sync_room_ids_for_user()` method.
+        See condition "1b)" comments in the `get_room_membership_for_user_at_to_token()` method.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
@@ -1934,8 +2037,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
             tok=user1_tok,
         )
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room1_token,
                 to_token=after_room1_token,
@@ -1970,8 +2073,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         after_more_changes_token = self.event_sources.get_current_token()
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=after_room1_token,
                 to_token=after_more_changes_token,
@@ -1985,9 +2088,11 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
             room_id_results[room_id1].event_id,
             join_response2["event_id"],
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should be considered `newly_joined` because there is some non-join event in
         # between our latest join event.
-        self.assertEqual(room_id_results[room_id1].newly_joined, True)
+        self.assertTrue(room_id1 in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_newly_joined_only_joins_during_token_range(
         self,
@@ -2033,8 +2138,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         after_room1_token = self.event_sources.get_current_token()
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room1_token,
                 to_token=after_room1_token,
@@ -2060,8 +2165,10 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
                 }
             ),
         )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
         # We should be `newly_joined` because we first joined during the token range
-        self.assertEqual(room_id_results[room_id1].newly_joined, True)
+        self.assertTrue(room_id1 in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
     def test_multiple_rooms_are_not_confused(
         self,
@@ -2084,16 +2191,18 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
 
         # Invited and left the room before the token
         self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
-        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        leave_room1_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
         # Invited to room2
-        self.helper.invite(room_id2, src=user2_id, targ=user1_id, tok=user2_tok)
+        invite_room2_response = self.helper.invite(
+            room_id2, src=user2_id, targ=user1_id, tok=user2_tok
+        )
 
         before_room3_token = self.event_sources.get_current_token()
 
         # Invited and left room3 during the from/to range
         room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
         self.helper.invite(room_id3, src=user2_id, targ=user1_id, tok=user2_tok)
-        self.helper.leave(room_id3, user1_id, tok=user1_tok)
+        leave_room3_response = self.helper.leave(room_id3, user1_id, tok=user1_tok)
 
         after_room3_token = self.event_sources.get_current_token()
 
@@ -2105,8 +2214,8 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         # Leave room3
         self.helper.leave(room_id3, user1_id, tok=user1_tok)
 
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_room3_token,
                 to_token=after_room3_token,
@@ -2116,19 +2225,158 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
         self.assertEqual(
             room_id_results.keys(),
             {
-                # `room_id1` shouldn't show up because we left before the from/to range
-                #
-                # Room should show up because we were invited before the from/to range
+                # Left before the from/to range
+                room_id1,
+                # Invited before the from/to range
                 room_id2,
-                # Room should show up because it was newly_left during the from/to range
+                # `newly_left` during the from/to range
                 room_id3,
             },
         )
 
+        # Room1
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            leave_room1_response["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+        # We should *NOT* be `newly_joined`/`newly_left` because we were invited and left
+        # before the token range
+        self.assertTrue(room_id1 not in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
-class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
+        # Room2
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id2].event_id,
+            invite_room2_response["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id2].membership, Membership.INVITE)
+        # We should *NOT* be `newly_joined`/`newly_left` because we were invited before
+        # the token range
+        self.assertTrue(room_id2 not in newly_joined)
+        self.assertTrue(room_id2 not in newly_left)
+
+        # Room3
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id3].event_id,
+            leave_room3_response["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id3].membership, Membership.LEAVE)
+        # We should be `newly_left` because we were invited and left during
+        # the token range
+        self.assertTrue(room_id3 not in newly_joined)
+        self.assertTrue(room_id3 in newly_left)
+
+    def test_state_reset(self) -> None:
+        """
+        Test a state reset scenario where the user gets removed from the room (when
+        there is no corresponding leave event)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # The room where the state reset will happen
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Join another room so we don't hit the short-circuit and return early if they
+        # have no room membership
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+        before_reset_token = self.event_sources.get_current_token()
+
+        # Send another state event to make a position for the state reset to happen at
+        dummy_state_response = self.helper.send_state(
+            room_id1,
+            event_type="foobarbaz",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+        dummy_state_pos = self.get_success(
+            self.store.get_position_for_event(dummy_state_response["event_id"])
+        )
+
+        # Mock a state reset removing the membership for user1 in the current state
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="current_state_events",
+                keyvalues={
+                    "room_id": room_id1,
+                    "type": EventTypes.Member,
+                    "state_key": user1_id,
+                },
+                desc="state reset user in current_state_events",
+            )
+        )
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="local_current_membership",
+                keyvalues={
+                    "room_id": room_id1,
+                    "user_id": user1_id,
+                },
+                desc="state reset user in local_current_membership",
+            )
+        )
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                table="current_state_delta_stream",
+                values={
+                    "stream_id": dummy_state_pos.stream,
+                    "room_id": room_id1,
+                    "type": EventTypes.Member,
+                    "state_key": user1_id,
+                    "event_id": None,
+                    "prev_event_id": join_response1["event_id"],
+                    "instance_name": dummy_state_pos.instance_name,
+                },
+                desc="state reset user in current_state_delta_stream",
+            )
+        )
+
+        # Manually bust the cache since we we're just manually messing with the database
+        # and not causing an actual state reset.
+        self.store._membership_stream_cache.entity_has_changed(
+            user1_id, dummy_state_pos.stream
+        )
+
+        after_reset_token = self.event_sources.get_current_token()
+
+        # The function under test
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+                UserID.from_string(user1_id),
+                from_token=before_reset_token,
+                to_token=after_reset_token,
+            )
+        )
+
+        # Room1 should show up because it was `newly_left` via state reset during the from/to range
+        self.assertEqual(room_id_results.keys(), {room_id1, room_id2})
+        # It should be pointing to no event because we were removed from the room
+        # without a corresponding leave event
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            None,
+        )
+        # State reset caused us to leave the room and there is no corresponding leave event
+        self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+        # We should *NOT* be `newly_joined` because we joined before the token range
+        self.assertTrue(room_id1 not in newly_joined)
+        # We should be `newly_left` because we were removed via state reset during the from/to range
+        self.assertTrue(room_id1 in newly_left)
+
+
+class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCase):
     """
-    Tests Sliding Sync handler `get_sync_room_ids_for_user()` to make sure it works with
+    Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it works with
     sharded event stream_writers enabled
     """
 
@@ -2187,7 +2435,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
 
         We then send some events to advance the stream positions of worker1 and worker3
         but worker2 is lagging behind because it's stuck. We are specifically testing
-        that `get_sync_room_ids_for_user(from_token=xxx, to_token=xxx)` should work
+        that `get_room_membership_for_user_at_to_token(from_token=xxx, to_token=xxx)` should work
         correctly in these adverse conditions.
         """
         user1_id = self.register_user("user1", "pass")
@@ -2226,7 +2474,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
         join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
         join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
         # Leave room2
-        self.helper.leave(room_id2, user1_id, tok=user1_tok)
+        leave_room2_response = self.helper.leave(room_id2, user1_id, tok=user1_tok)
         join_response3 = self.helper.join(room_id3, user1_id, tok=user1_tok)
         # Leave room3
         self.helper.leave(room_id3, user1_id, tok=user1_tok)
@@ -2263,7 +2511,7 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
         # For room_id1/worker1: leave and join the room to advance the stream position
         # and generate membership changes.
         self.helper.leave(room_id1, user1_id, tok=user1_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_room1_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
         # For room_id2/worker2: which is currently stuck, join the room.
         join_on_worker2_response = self.helper.join(room_id2, user1_id, tok=user1_tok)
         # For room_id3/worker3: leave and join the room to advance the stream position
@@ -2316,8 +2564,8 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
         self.get_success(actx.__aexit__(None, None, None))
 
         # The function under test
-        room_id_results = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
+        room_id_results, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
                 UserID.from_string(user1_id),
                 from_token=before_stuck_activity_token,
                 to_token=stuck_activity_token,
@@ -2328,23 +2576,54 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
             room_id_results.keys(),
             {
                 room_id1,
-                # room_id2 shouldn't show up because we left before the from/to range
-                # and the join event during the range happened while worker2 was stuck.
-                # This means that from the perspective of the master, where the
-                # `stuck_activity_token` is generated, the stream position for worker2
-                # wasn't advanced to the join yet. Looking at the `instance_map`, the
-                # join technically comes after `stuck_activity_token``.
-                #
-                # room_id2,
+                room_id2,
                 room_id3,
             },
         )
 
+        # Room1
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            join_room1_response["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
+        # We should be `newly_joined` because we joined during the token range
+        self.assertTrue(room_id1 in newly_joined)
+        self.assertTrue(room_id1 not in newly_left)
 
-class FilterRoomsTestCase(HomeserverTestCase):
+        # Room2
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id2].event_id,
+            leave_room2_response["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE)
+        # room_id2 should *NOT* be considered `newly_left` because we left before the
+        # from/to range and the join event during the range happened while worker2 was
+        # stuck. This means that from the perspective of the master, where the
+        # `stuck_activity_token` is generated, the stream position for worker2 wasn't
+        # advanced to the join yet. Looking at the `instance_map`, the join technically
+        # comes after `stuck_activity_token`.
+        self.assertTrue(room_id2 not in newly_joined)
+        self.assertTrue(room_id2 not in newly_left)
+
+        # Room3
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[room_id3].event_id,
+            join_on_worker3_response["event_id"],
+        )
+        self.assertEqual(room_id_results[room_id3].membership, Membership.JOIN)
+        # We should be `newly_joined` because we joined during the token range
+        self.assertTrue(room_id3 in newly_joined)
+        self.assertTrue(room_id3 not in newly_left)
+
+
+class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
     """
-    Tests Sliding Sync handler `filter_rooms()` to make sure it includes/excludes rooms
-    correctly.
+    Tests Sliding Sync handler `filter_rooms_relevant_for_sync()` to make sure it returns
+    the correct list of rooms IDs.
     """
 
     servlets = [
@@ -2364,432 +2643,344 @@ class FilterRoomsTestCase(HomeserverTestCase):
         self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
         self.store = self.hs.get_datastores().main
         self.event_sources = hs.get_event_sources()
+        self.storage_controllers = hs.get_storage_controllers()
 
-    def _create_dm_room(
+    def _get_sync_room_ids_for_user(
         self,
-        inviter_user_id: str,
-        inviter_tok: str,
-        invitee_user_id: str,
-        invitee_tok: str,
-    ) -> str:
+        user: UserID,
+        to_token: StreamToken,
+        from_token: Optional[StreamToken],
+    ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]:
         """
-        Helper to create a DM room as the "inviter" and invite the "invitee" user to the room. The
-        "invitee" user also will join the room. The `m.direct` account data will be set
-        for both users.
+        Get the rooms the user should be syncing with
         """
-
-        # Create a room and send an invite the other user
-        room_id = self.helper.create_room_as(
-            inviter_user_id,
-            is_public=False,
-            tok=inviter_tok,
-        )
-        self.helper.invite(
-            room_id,
-            src=inviter_user_id,
-            targ=invitee_user_id,
-            tok=inviter_tok,
-            extra_data={"is_direct": True},
-        )
-        # Person that was invited joins the room
-        self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
-
-        # Mimic the client setting the room as a direct message in the global account
-        # data
-        self.get_success(
-            self.store.add_account_data_for_user(
-                invitee_user_id,
-                AccountDataTypes.DIRECT,
-                {inviter_user_id: [room_id]},
+        room_membership_for_user_map, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+                user=user,
+                from_token=from_token,
+                to_token=to_token,
             )
         )
-        self.get_success(
-            self.store.add_account_data_for_user(
-                inviter_user_id,
-                AccountDataTypes.DIRECT,
-                {invitee_user_id: [room_id]},
+        filtered_sync_room_map = self.get_success(
+            self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync(
+                user=user,
+                room_membership_for_user_map=room_membership_for_user_map,
+                newly_left_room_ids=newly_left,
             )
         )
 
-        return room_id
+        return filtered_sync_room_map, newly_joined, newly_left
 
-    def test_filter_dm_rooms(self) -> None:
+    def test_no_rooms(self) -> None:
         """
-        Test `filter.is_dm` for DM rooms
+        Test when the user has never joined any rooms before
+        """
+        user1_id = self.register_user("user1", "pass")
+        # user1_tok = self.login(user1_id, "pass")
+
+        now_token = self.event_sources.get_current_token()
+
+        room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user(
+            UserID.from_string(user1_id),
+            from_token=now_token,
+            to_token=now_token,
+        )
+
+        self.assertEqual(room_id_results.keys(), set())
+
+    def test_basic_rooms(self) -> None:
+        """
+        Test that rooms that the user is joined to, invited to, banned from, and knocked
+        on show up.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
         user2_id = self.register_user("user2", "pass")
         user2_tok = self.login(user2_id, "pass")
 
-        # Create a normal room
-        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        before_room_token = self.event_sources.get_current_token()
 
-        # Create a DM room
-        dm_room_id = self._create_dm_room(
-            inviter_user_id=user1_id,
-            inviter_tok=user1_tok,
-            invitee_user_id=user2_id,
-            invitee_tok=user2_tok,
+        join_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response = self.helper.join(join_room_id, user1_id, tok=user1_tok)
+
+        # Setup the invited room (user2 invites user1 to the room)
+        invited_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        invite_response = self.helper.invite(
+            invited_room_id, targ=user1_id, tok=user2_tok
         )
 
-        after_rooms_token = self.event_sources.get_current_token()
-
-        # Get the rooms the user should be syncing with
-        sync_room_map = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
-                UserID.from_string(user1_id),
-                from_token=None,
-                to_token=after_rooms_token,
-            )
+        # Setup the ban room (user2 bans user1 from the room)
+        ban_room_id = self.helper.create_room_as(
+            user2_id, tok=user2_tok, is_public=True
+        )
+        self.helper.join(ban_room_id, user1_id, tok=user1_tok)
+        ban_response = self.helper.ban(
+            ban_room_id, src=user2_id, targ=user1_id, tok=user2_tok
         )
 
-        # Try with `is_dm=True`
-        truthy_filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    is_dm=True,
-                ),
-                after_rooms_token,
-            )
+        # Setup the knock room (user1 knocks on the room)
+        knock_room_id = self.helper.create_room_as(
+            user2_id, tok=user2_tok, room_version=RoomVersions.V7.identifier
         )
-
-        self.assertEqual(truthy_filtered_room_map.keys(), {dm_room_id})
-
-        # Try with `is_dm=False`
-        falsy_filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    is_dm=False,
-                ),
-                after_rooms_token,
-            )
-        )
-
-        self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
-
-    def test_filter_encrypted_rooms(self) -> None:
-        """
-        Test `filter.is_encrypted` for encrypted rooms
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-
-        # Create a normal room
-        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
-
-        # Create an encrypted room
-        encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
         self.helper.send_state(
-            encrypted_room_id,
-            EventTypes.RoomEncryption,
-            {"algorithm": "m.megolm.v1.aes-sha2"},
-            tok=user1_tok,
+            knock_room_id,
+            EventTypes.JoinRules,
+            {"join_rule": JoinRules.KNOCK},
+            tok=user2_tok,
         )
-
-        after_rooms_token = self.event_sources.get_current_token()
-
-        # Get the rooms the user should be syncing with
-        sync_room_map = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
-                UserID.from_string(user1_id),
-                from_token=None,
-                to_token=after_rooms_token,
+        # User1 knocks on the room
+        knock_channel = self.make_request(
+            "POST",
+            "/_matrix/client/r0/knock/%s" % (knock_room_id,),
+            b"{}",
+            user1_tok,
+        )
+        self.assertEqual(knock_channel.code, 200, knock_channel.result)
+        knock_room_membership_state_event = self.get_success(
+            self.storage_controllers.state.get_current_state_event(
+                knock_room_id, EventTypes.Member, user1_id
             )
         )
+        assert knock_room_membership_state_event is not None
 
-        # Try with `is_encrypted=True`
-        truthy_filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    is_encrypted=True,
-                ),
-                after_rooms_token,
-            )
+        after_room_token = self.event_sources.get_current_token()
+
+        room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user(
+            UserID.from_string(user1_id),
+            from_token=before_room_token,
+            to_token=after_room_token,
         )
 
-        self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id})
-
-        # Try with `is_encrypted=False`
-        falsy_filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    is_encrypted=False,
-                ),
-                after_rooms_token,
-            )
+        # Ensure that the invited, ban, and knock rooms show up
+        self.assertEqual(
+            room_id_results.keys(),
+            {
+                join_room_id,
+                invited_room_id,
+                ban_room_id,
+                knock_room_id,
+            },
         )
+        # It should be pointing to the the respective membership event (latest
+        # membership event in the from/to range)
+        self.assertEqual(
+            room_id_results[join_room_id].event_id,
+            join_response["event_id"],
+        )
+        self.assertEqual(room_id_results[join_room_id].membership, Membership.JOIN)
+        self.assertTrue(join_room_id in newly_joined)
+        self.assertTrue(join_room_id not in newly_left)
 
-        self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
+        self.assertEqual(
+            room_id_results[invited_room_id].event_id,
+            invite_response["event_id"],
+        )
+        self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE)
+        self.assertTrue(invited_room_id not in newly_joined)
+        self.assertTrue(invited_room_id not in newly_left)
 
-    def test_filter_invite_rooms(self) -> None:
+        self.assertEqual(
+            room_id_results[ban_room_id].event_id,
+            ban_response["event_id"],
+        )
+        self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN)
+        self.assertTrue(ban_room_id not in newly_joined)
+        self.assertTrue(ban_room_id not in newly_left)
+
+        self.assertEqual(
+            room_id_results[knock_room_id].event_id,
+            knock_room_membership_state_event.event_id,
+        )
+        self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK)
+        self.assertTrue(knock_room_id not in newly_joined)
+        self.assertTrue(knock_room_id not in newly_left)
+
+    def test_only_newly_left_rooms_show_up(self) -> None:
         """
-        Test `filter.is_invite` for rooms that the user has been invited to
+        Test that `newly_left` rooms still show up in the sync response but rooms that
+        were left before the `from_token` don't show up. See condition "2)" comments in
+        the `get_room_membership_for_user_at_to_token()` method.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Leave before we calculate the `from_token`
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+        after_room1_token = self.event_sources.get_current_token()
+
+        # Leave during the from_token/to_token range (newly_left)
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        _leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok)
+
+        after_room2_token = self.event_sources.get_current_token()
+
+        room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user(
+            UserID.from_string(user1_id),
+            from_token=after_room1_token,
+            to_token=after_room2_token,
+        )
+
+        # Only the `newly_left` room should show up
+        self.assertEqual(room_id_results.keys(), {room_id2})
+        self.assertEqual(
+            room_id_results[room_id2].event_id,
+            _leave_response2["event_id"],
+        )
+        # We should *NOT* be `newly_joined` because we are instead `newly_left`
+        self.assertTrue(room_id2 not in newly_joined)
+        self.assertTrue(room_id2 in newly_left)
+
+    def test_get_kicked_room(self) -> None:
+        """
+        Test that a room that the user was kicked from still shows up. When the user
+        comes back to their client, they should see that they were kicked.
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
         user2_id = self.register_user("user2", "pass")
         user2_tok = self.login(user2_id, "pass")
 
-        # Create a normal room
-        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id, user1_id, tok=user1_tok)
-
-        # Create a room that user1 is invited to
-        invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
-
-        after_rooms_token = self.event_sources.get_current_token()
-
-        # Get the rooms the user should be syncing with
-        sync_room_map = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
-                UserID.from_string(user1_id),
-                from_token=None,
-                to_token=after_rooms_token,
-            )
+        # Setup the kick room (user2 kicks user1 from the room)
+        kick_room_id = self.helper.create_room_as(
+            user2_id, tok=user2_tok, is_public=True
+        )
+        self.helper.join(kick_room_id, user1_id, tok=user1_tok)
+        # Kick user1 from the room
+        kick_response = self.helper.change_membership(
+            room=kick_room_id,
+            src=user2_id,
+            targ=user1_id,
+            tok=user2_tok,
+            membership=Membership.LEAVE,
+            extra_data={
+                "reason": "Bad manners",
+            },
         )
 
-        # Try with `is_invite=True`
-        truthy_filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    is_invite=True,
-                ),
-                after_rooms_token,
-            )
+        after_kick_token = self.event_sources.get_current_token()
+
+        room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user(
+            UserID.from_string(user1_id),
+            from_token=after_kick_token,
+            to_token=after_kick_token,
         )
 
-        self.assertEqual(truthy_filtered_room_map.keys(), {invite_room_id})
-
-        # Try with `is_invite=False`
-        falsy_filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    is_invite=False,
-                ),
-                after_rooms_token,
-            )
+        # The kicked room should show up
+        self.assertEqual(room_id_results.keys(), {kick_room_id})
+        # It should be pointing to the latest membership event in the from/to range
+        self.assertEqual(
+            room_id_results[kick_room_id].event_id,
+            kick_response["event_id"],
         )
+        self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE)
+        self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id)
+        # We should *NOT* be `newly_joined` because we were not joined at the the time
+        # of the `to_token`.
+        self.assertTrue(kick_room_id not in newly_joined)
+        self.assertTrue(kick_room_id not in newly_left)
 
-        self.assertEqual(falsy_filtered_room_map.keys(), {room_id})
-
-    def test_filter_room_types(self) -> None:
+    def test_state_reset(self) -> None:
         """
-        Test `filter.room_types` for different room types
+        Test a state reset scenario where the user gets removed from the room (when
+        there is no corresponding leave event)
         """
         user1_id = self.register_user("user1", "pass")
         user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
 
-        # Create a normal room (no room type)
-        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # The room where the state reset will happen
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
 
-        # Create a space room
-        space_room_id = self.helper.create_room_as(
-            user1_id,
-            tok=user1_tok,
-            extra_content={
-                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
-            },
+        # Join another room so we don't hit the short-circuit and return early if they
+        # have no room membership
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+        before_reset_token = self.event_sources.get_current_token()
+
+        # Send another state event to make a position for the state reset to happen at
+        dummy_state_response = self.helper.send_state(
+            room_id1,
+            event_type="foobarbaz",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+        dummy_state_pos = self.get_success(
+            self.store.get_position_for_event(dummy_state_response["event_id"])
         )
 
-        # Create an arbitrarily typed room
-        foo_room_id = self.helper.create_room_as(
-            user1_id,
-            tok=user1_tok,
-            extra_content={
-                "creation_content": {
-                    EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz"
-                }
-            },
+        # Mock a state reset removing the membership for user1 in the current state
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="current_state_events",
+                keyvalues={
+                    "room_id": room_id1,
+                    "type": EventTypes.Member,
+                    "state_key": user1_id,
+                },
+                desc="state reset user in current_state_events",
+            )
         )
-
-        after_rooms_token = self.event_sources.get_current_token()
-
-        # Get the rooms the user should be syncing with
-        sync_room_map = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
-                UserID.from_string(user1_id),
-                from_token=None,
-                to_token=after_rooms_token,
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="local_current_membership",
+                keyvalues={
+                    "room_id": room_id1,
+                    "user_id": user1_id,
+                },
+                desc="state reset user in local_current_membership",
+            )
+        )
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                table="current_state_delta_stream",
+                values={
+                    "stream_id": dummy_state_pos.stream,
+                    "room_id": room_id1,
+                    "type": EventTypes.Member,
+                    "state_key": user1_id,
+                    "event_id": None,
+                    "prev_event_id": join_response1["event_id"],
+                    "instance_name": dummy_state_pos.instance_name,
+                },
+                desc="state reset user in current_state_delta_stream",
             )
         )
 
-        # Try finding only normal rooms
-        filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]),
-                after_rooms_token,
-            )
+        # Manually bust the cache since we we're just manually messing with the database
+        # and not causing an actual state reset.
+        self.store._membership_stream_cache.entity_has_changed(
+            user1_id, dummy_state_pos.stream
         )
 
-        self.assertEqual(filtered_room_map.keys(), {room_id})
+        after_reset_token = self.event_sources.get_current_token()
 
-        # Try finding only spaces
-        filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]),
-                after_rooms_token,
-            )
+        # The function under test
+        room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user(
+            UserID.from_string(user1_id),
+            from_token=before_reset_token,
+            to_token=after_reset_token,
         )
 
-        self.assertEqual(filtered_room_map.keys(), {space_room_id})
-
-        # Try finding normal rooms and spaces
-        filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    room_types=[None, RoomTypes.SPACE]
-                ),
-                after_rooms_token,
-            )
+        # Room1 should show up because it was `newly_left` via state reset during the from/to range
+        self.assertEqual(room_id_results.keys(), {room_id1, room_id2})
+        # It should be pointing to no event because we were removed from the room
+        # without a corresponding leave event
+        self.assertEqual(
+            room_id_results[room_id1].event_id,
+            None,
         )
-
-        self.assertEqual(filtered_room_map.keys(), {room_id, space_room_id})
-
-        # Try finding an arbitrary room type
-        filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    room_types=["org.matrix.foobarbaz"]
-                ),
-                after_rooms_token,
-            )
-        )
-
-        self.assertEqual(filtered_room_map.keys(), {foo_room_id})
-
-    def test_filter_not_room_types(self) -> None:
-        """
-        Test `filter.not_room_types` for different room types
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-
-        # Create a normal room (no room type)
-        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
-
-        # Create a space room
-        space_room_id = self.helper.create_room_as(
-            user1_id,
-            tok=user1_tok,
-            extra_content={
-                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
-            },
-        )
-
-        # Create an arbitrarily typed room
-        foo_room_id = self.helper.create_room_as(
-            user1_id,
-            tok=user1_tok,
-            extra_content={
-                "creation_content": {
-                    EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz"
-                }
-            },
-        )
-
-        after_rooms_token = self.event_sources.get_current_token()
-
-        # Get the rooms the user should be syncing with
-        sync_room_map = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
-                UserID.from_string(user1_id),
-                from_token=None,
-                to_token=after_rooms_token,
-            )
-        )
-
-        # Try finding *NOT* normal rooms
-        filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(not_room_types=[None]),
-                after_rooms_token,
-            )
-        )
-
-        self.assertEqual(filtered_room_map.keys(), {space_room_id, foo_room_id})
-
-        # Try finding *NOT* spaces
-        filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    not_room_types=[RoomTypes.SPACE]
-                ),
-                after_rooms_token,
-            )
-        )
-
-        self.assertEqual(filtered_room_map.keys(), {room_id, foo_room_id})
-
-        # Try finding *NOT* normal rooms or spaces
-        filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    not_room_types=[None, RoomTypes.SPACE]
-                ),
-                after_rooms_token,
-            )
-        )
-
-        self.assertEqual(filtered_room_map.keys(), {foo_room_id})
-
-        # Test how it behaves when we have both `room_types` and `not_room_types`.
-        # `not_room_types` should win.
-        filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    room_types=[None], not_room_types=[None]
-                ),
-                after_rooms_token,
-            )
-        )
-
-        # Nothing matches because nothing is both a normal room and not a normal room
-        self.assertEqual(filtered_room_map.keys(), set())
-
-        # Test how it behaves when we have both `room_types` and `not_room_types`.
-        # `not_room_types` should win.
-        filtered_room_map = self.get_success(
-            self.sliding_sync_handler.filter_rooms(
-                UserID.from_string(user1_id),
-                sync_room_map,
-                SlidingSyncConfig.SlidingSyncList.Filters(
-                    room_types=[None, RoomTypes.SPACE], not_room_types=[None]
-                ),
-                after_rooms_token,
-            )
-        )
-
-        self.assertEqual(filtered_room_map.keys(), {space_room_id})
+        # State reset caused us to leave the room and there is no corresponding leave event
+        self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+        # We should *NOT* be `newly_joined` because we joined before the token range
+        self.assertTrue(room_id1 not in newly_joined)
+        # We should be `newly_left` because we were removed via state reset during the from/to range
+        self.assertTrue(room_id1 in newly_left)
 
 
 class SortRoomsTestCase(HomeserverTestCase):
@@ -2816,6 +3007,32 @@ class SortRoomsTestCase(HomeserverTestCase):
         self.store = self.hs.get_datastores().main
         self.event_sources = hs.get_event_sources()
 
+    def _get_sync_room_ids_for_user(
+        self,
+        user: UserID,
+        to_token: StreamToken,
+        from_token: Optional[StreamToken],
+    ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]:
+        """
+        Get the rooms the user should be syncing with
+        """
+        room_membership_for_user_map, newly_joined, newly_left = self.get_success(
+            self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+                user=user,
+                from_token=from_token,
+                to_token=to_token,
+            )
+        )
+        filtered_sync_room_map = self.get_success(
+            self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync(
+                user=user,
+                room_membership_for_user_map=room_membership_for_user_map,
+                newly_left_room_ids=newly_left,
+            )
+        )
+
+        return filtered_sync_room_map, newly_joined, newly_left
+
     def test_sort_activity_basic(self) -> None:
         """
         Rooms with newer activity are sorted first.
@@ -2835,24 +3052,22 @@ class SortRoomsTestCase(HomeserverTestCase):
         after_rooms_token = self.event_sources.get_current_token()
 
         # Get the rooms the user should be syncing with
-        sync_room_map = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
-                UserID.from_string(user1_id),
-                from_token=None,
-                to_token=after_rooms_token,
-            )
+        sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user(
+            UserID.from_string(user1_id),
+            from_token=None,
+            to_token=after_rooms_token,
         )
 
         # Sort the rooms (what we're testing)
-        sorted_room_info = self.get_success(
-            self.sliding_sync_handler.sort_rooms(
+        sorted_sync_rooms = self.get_success(
+            self.sliding_sync_handler.room_lists.sort_rooms(
                 sync_room_map=sync_room_map,
                 to_token=after_rooms_token,
             )
         )
 
         self.assertEqual(
-            [room_id for room_id, _ in sorted_room_info],
+            [room_membership.room_id for room_membership in sorted_sync_rooms],
             [room_id2, room_id1],
         )
 
@@ -2918,24 +3133,22 @@ class SortRoomsTestCase(HomeserverTestCase):
         self.helper.send(room_id3, "activity in room3", tok=user2_tok)
 
         # Get the rooms the user should be syncing with
-        sync_room_map = self.get_success(
-            self.sliding_sync_handler.get_sync_room_ids_for_user(
-                UserID.from_string(user1_id),
-                from_token=before_rooms_token,
-                to_token=after_rooms_token,
-            )
+        sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user(
+            UserID.from_string(user1_id),
+            from_token=before_rooms_token,
+            to_token=after_rooms_token,
         )
 
         # Sort the rooms (what we're testing)
-        sorted_room_info = self.get_success(
-            self.sliding_sync_handler.sort_rooms(
+        sorted_sync_rooms = self.get_success(
+            self.sliding_sync_handler.room_lists.sort_rooms(
                 sync_room_map=sync_room_map,
                 to_token=after_rooms_token,
             )
         )
 
         self.assertEqual(
-            [room_id for room_id, _ in sorted_room_info],
+            [room_membership.room_id for room_membership in sorted_sync_rooms],
             [room_id2, room_id1, room_id3],
             "Corresponding map to disambiguate the opaque room IDs: "
             + str(
@@ -2946,3 +3159,747 @@ class SortRoomsTestCase(HomeserverTestCase):
                 }
             ),
         )
+
+    def test_default_bump_event_types(self) -> None:
+        """
+        Test that we only consider the *latest* event in the room when sorting (not
+        `bump_event_types`).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+        )
+        message_response = self.helper.send(room_id1, "message in room1", tok=user1_tok)
+        room_id2 = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+        )
+        self.helper.send(room_id2, "message in room2", tok=user1_tok)
+
+        # Send a reaction in room1 which isn't in `DEFAULT_BUMP_EVENT_TYPES` but we only
+        # care about sorting by the *latest* event in the room.
+        self.helper.send_event(
+            room_id1,
+            type=EventTypes.Reaction,
+            content={
+                "m.relates_to": {
+                    "event_id": message_response["event_id"],
+                    "key": "👍",
+                    "rel_type": "m.annotation",
+                }
+            },
+            tok=user1_tok,
+        )
+
+        after_rooms_token = self.event_sources.get_current_token()
+
+        # Get the rooms the user should be syncing with
+        sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user(
+            UserID.from_string(user1_id),
+            from_token=None,
+            to_token=after_rooms_token,
+        )
+
+        # Sort the rooms (what we're testing)
+        sorted_sync_rooms = self.get_success(
+            self.sliding_sync_handler.room_lists.sort_rooms(
+                sync_room_map=sync_room_map,
+                to_token=after_rooms_token,
+            )
+        )
+
+        self.assertEqual(
+            [room_membership.room_id for room_membership in sorted_sync_rooms],
+            # room1 sorts before room2 because it has the latest event (the reaction).
+            # We only care about the *latest* event in the room.
+            [room_id1, room_id2],
+        )
+
+
+@attr.s(slots=True, auto_attribs=True, frozen=True)
+class RequiredStateChangesTestParameters:
+    previous_required_state_map: Dict[str, Set[str]]
+    request_required_state_map: Dict[str, Set[str]]
+    state_deltas: StateMap[str]
+    expected_with_state_deltas: Tuple[
+        Optional[Mapping[str, AbstractSet[str]]], StateFilter
+    ]
+    expected_without_state_deltas: Tuple[
+        Optional[Mapping[str, AbstractSet[str]]], StateFilter
+    ]
+
+
+class RequiredStateChangesTestCase(unittest.TestCase):
+    """Test cases for `_required_state_changes`"""
+
+    @parameterized.expand(
+        [
+            (
+                "simple_no_change",
+                """Test no change to required state""",
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={"type1": {"state_key"}},
+                    request_required_state_map={"type1": {"state_key"}},
+                    state_deltas={("type1", "state_key"): "$event_id"},
+                    # No changes
+                    expected_with_state_deltas=(None, StateFilter.none()),
+                    expected_without_state_deltas=(None, StateFilter.none()),
+                ),
+            ),
+            (
+                "simple_add_type",
+                """Test adding a type to the config""",
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={"type1": {"state_key"}},
+                    request_required_state_map={
+                        "type1": {"state_key"},
+                        "type2": {"state_key"},
+                    },
+                    state_deltas={("type2", "state_key"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # We've added a type so we should persist the changed required state
+                        # config.
+                        {"type1": {"state_key"}, "type2": {"state_key"}},
+                        # We should see the new type added
+                        StateFilter.from_types([("type2", "state_key")]),
+                    ),
+                    expected_without_state_deltas=(
+                        {"type1": {"state_key"}, "type2": {"state_key"}},
+                        StateFilter.from_types([("type2", "state_key")]),
+                    ),
+                ),
+            ),
+            (
+                "simple_add_type_from_nothing",
+                """Test adding a type to the config when previously requesting nothing""",
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={},
+                    request_required_state_map={
+                        "type1": {"state_key"},
+                        "type2": {"state_key"},
+                    },
+                    state_deltas={("type2", "state_key"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # We've added a type so we should persist the changed required state
+                        # config.
+                        {"type1": {"state_key"}, "type2": {"state_key"}},
+                        # We should see the new types added
+                        StateFilter.from_types(
+                            [("type1", "state_key"), ("type2", "state_key")]
+                        ),
+                    ),
+                    expected_without_state_deltas=(
+                        {"type1": {"state_key"}, "type2": {"state_key"}},
+                        StateFilter.from_types(
+                            [("type1", "state_key"), ("type2", "state_key")]
+                        ),
+                    ),
+                ),
+            ),
+            (
+                "simple_add_state_key",
+                """Test adding a state key to the config""",
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={"type": {"state_key1"}},
+                    request_required_state_map={"type": {"state_key1", "state_key2"}},
+                    state_deltas={("type", "state_key2"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # We've added a key so we should persist the changed required state
+                        # config.
+                        {"type": {"state_key1", "state_key2"}},
+                        # We should see the new state_keys added
+                        StateFilter.from_types([("type", "state_key2")]),
+                    ),
+                    expected_without_state_deltas=(
+                        {"type": {"state_key1", "state_key2"}},
+                        StateFilter.from_types([("type", "state_key2")]),
+                    ),
+                ),
+            ),
+            (
+                "simple_remove_type",
+                """
+                Test removing a type from the config when there are a matching state
+                delta does cause the persisted required state config to change
+
+                Test removing a type from the config when there are no matching state
+                deltas does *not* cause the persisted required state config to change
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={
+                        "type1": {"state_key"},
+                        "type2": {"state_key"},
+                    },
+                    request_required_state_map={"type1": {"state_key"}},
+                    state_deltas={("type2", "state_key"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # Remove `type2` since there's been a change to that state,
+                        # (persist the change to required state). That way next time,
+                        # they request `type2`, we see that we haven't sent it before
+                        # and send the new state. (we should still keep track that we've
+                        # sent `type1` before).
+                        {"type1": {"state_key"}},
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        # `type2` is no longer requested but since that state hasn't
+                        # changed, nothing should change (we should still keep track
+                        # that we've sent `type2` before).
+                        None,
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "simple_remove_type_to_nothing",
+                """
+                Test removing a type from the config and no longer requesting any state
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={
+                        "type1": {"state_key"},
+                        "type2": {"state_key"},
+                    },
+                    request_required_state_map={},
+                    state_deltas={("type2", "state_key"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # Remove `type2` since there's been a change to that state,
+                        # (persist the change to required state). That way next time,
+                        # they request `type2`, we see that we haven't sent it before
+                        # and send the new state. (we should still keep track that we've
+                        # sent `type1` before).
+                        {"type1": {"state_key"}},
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        # `type2` is no longer requested but since that state hasn't
+                        # changed, nothing should change (we should still keep track
+                        # that we've sent `type2` before).
+                        None,
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "simple_remove_state_key",
+                """
+                Test removing a state_key from the config
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={"type": {"state_key1", "state_key2"}},
+                    request_required_state_map={"type": {"state_key1"}},
+                    state_deltas={("type", "state_key2"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # Remove `(type, state_key2)` since there's been a change
+                        # to that state (persist the change to required state).
+                        # That way next time, they request `(type, state_key2)`, we see
+                        # that we haven't sent it before and send the new state. (we
+                        # should still keep track that we've sent `(type, state_key1)`
+                        # before).
+                        {"type": {"state_key1"}},
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        # `(type, state_key2)` is no longer requested but since that
+                        # state hasn't changed, nothing should change (we should still
+                        # keep track that we've sent `(type, state_key1)` and `(type,
+                        # state_key2)` before).
+                        None,
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "type_wildcards_add",
+                """
+                Test adding a wildcard type causes the persisted required state config
+                to change and we request everything.
+
+                If a event type wildcard has been added or removed we don't try and do
+                anything fancy, and instead always update the effective room required
+                state config to match the request.
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={"type1": {"state_key2"}},
+                    request_required_state_map={
+                        "type1": {"state_key2"},
+                        StateValues.WILDCARD: {"state_key"},
+                    },
+                    state_deltas={
+                        ("other_type", "state_key"): "$event_id",
+                    },
+                    # We've added a wildcard, so we persist the change and request everything
+                    expected_with_state_deltas=(
+                        {"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}},
+                        StateFilter.all(),
+                    ),
+                    expected_without_state_deltas=(
+                        {"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}},
+                        StateFilter.all(),
+                    ),
+                ),
+            ),
+            (
+                "type_wildcards_remove",
+                """
+                Test removing a wildcard type causes the persisted required state config
+                to change and request nothing.
+
+                If a event type wildcard has been added or removed we don't try and do
+                anything fancy, and instead always update the effective room required
+                state config to match the request.
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={
+                        "type1": {"state_key2"},
+                        StateValues.WILDCARD: {"state_key"},
+                    },
+                    request_required_state_map={"type1": {"state_key2"}},
+                    state_deltas={
+                        ("other_type", "state_key"): "$event_id",
+                    },
+                    # We've removed a type wildcard, so we persist the change but don't request anything
+                    expected_with_state_deltas=(
+                        {"type1": {"state_key2"}},
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        {"type1": {"state_key2"}},
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "state_key_wildcards_add",
+                """Test adding a wildcard state_key""",
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={"type1": {"state_key"}},
+                    request_required_state_map={
+                        "type1": {"state_key"},
+                        "type2": {StateValues.WILDCARD},
+                    },
+                    state_deltas={("type2", "state_key"): "$event_id"},
+                    # We've added a wildcard state_key, so we persist the change and
+                    # request all of the state for that type
+                    expected_with_state_deltas=(
+                        {"type1": {"state_key"}, "type2": {StateValues.WILDCARD}},
+                        StateFilter.from_types([("type2", None)]),
+                    ),
+                    expected_without_state_deltas=(
+                        {"type1": {"state_key"}, "type2": {StateValues.WILDCARD}},
+                        StateFilter.from_types([("type2", None)]),
+                    ),
+                ),
+            ),
+            (
+                "state_key_wildcards_remove",
+                """Test removing a wildcard state_key""",
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={
+                        "type1": {"state_key"},
+                        "type2": {StateValues.WILDCARD},
+                    },
+                    request_required_state_map={"type1": {"state_key"}},
+                    state_deltas={("type2", "state_key"): "$event_id"},
+                    # We've removed a state_key wildcard, so we persist the change and
+                    # request nothing
+                    expected_with_state_deltas=(
+                        {"type1": {"state_key"}},
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                    # We've removed a state_key wildcard but there have been no matching
+                    # state changes, so no changes needed, just persist the
+                    # `request_required_state_map` as-is.
+                    expected_without_state_deltas=(
+                        None,
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "state_key_remove_some",
+                """
+                Test that removing state keys work when only some of the state keys have
+                changed
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={
+                        "type1": {"state_key1", "state_key2", "state_key3"}
+                    },
+                    request_required_state_map={"type1": {"state_key1"}},
+                    state_deltas={("type1", "state_key3"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # We've removed some state keys from the type, but only state_key3 was
+                        # changed so only that one should be removed.
+                        {"type1": {"state_key1", "state_key2"}},
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        # No changes needed, just persist the
+                        # `request_required_state_map` as-is
+                        None,
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "state_key_me_add",
+                """
+                Test adding state keys work when using "$ME"
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={},
+                    request_required_state_map={"type1": {StateValues.ME}},
+                    state_deltas={("type1", "@user:test"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # We've added a type so we should persist the changed required state
+                        # config.
+                        {"type1": {StateValues.ME}},
+                        # We should see the new state_keys added
+                        StateFilter.from_types([("type1", "@user:test")]),
+                    ),
+                    expected_without_state_deltas=(
+                        {"type1": {StateValues.ME}},
+                        StateFilter.from_types([("type1", "@user:test")]),
+                    ),
+                ),
+            ),
+            (
+                "state_key_me_remove",
+                """
+                Test removing state keys work when using "$ME"
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={"type1": {StateValues.ME}},
+                    request_required_state_map={},
+                    state_deltas={("type1", "@user:test"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # Remove `type1` since there's been a change to that state,
+                        # (persist the change to required state). That way next time,
+                        # they request `type1`, we see that we haven't sent it before
+                        # and send the new state. (if we were tracking that we sent any
+                        # other state, we should still keep track that).
+                        {},
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        # `type1` is no longer requested but since that state hasn't
+                        # changed, nothing should change (we should still keep track
+                        # that we've sent `type1` before).
+                        None,
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "state_key_user_id_add",
+                """
+                Test adding state keys work when using your own user ID
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={},
+                    request_required_state_map={"type1": {"@user:test"}},
+                    state_deltas={("type1", "@user:test"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # We've added a type so we should persist the changed required state
+                        # config.
+                        {"type1": {"@user:test"}},
+                        # We should see the new state_keys added
+                        StateFilter.from_types([("type1", "@user:test")]),
+                    ),
+                    expected_without_state_deltas=(
+                        {"type1": {"@user:test"}},
+                        StateFilter.from_types([("type1", "@user:test")]),
+                    ),
+                ),
+            ),
+            (
+                "state_key_me_remove",
+                """
+                Test removing state keys work when using your own user ID
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={"type1": {"@user:test"}},
+                    request_required_state_map={},
+                    state_deltas={("type1", "@user:test"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # Remove `type1` since there's been a change to that state,
+                        # (persist the change to required state). That way next time,
+                        # they request `type1`, we see that we haven't sent it before
+                        # and send the new state. (if we were tracking that we sent any
+                        # other state, we should still keep track that).
+                        {},
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        # `type1` is no longer requested but since that state hasn't
+                        # changed, nothing should change (we should still keep track
+                        # that we've sent `type1` before).
+                        None,
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "state_key_lazy_add",
+                """
+                Test adding state keys work when using "$LAZY"
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={},
+                    request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
+                    state_deltas={(EventTypes.Member, "@user:test"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # If a "$LAZY" has been added or removed we always update the
+                        # required state to what was requested for simplicity.
+                        {EventTypes.Member: {StateValues.LAZY}},
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        {EventTypes.Member: {StateValues.LAZY}},
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "state_key_lazy_remove",
+                """
+                Test removing state keys work when using "$LAZY"
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
+                    request_required_state_map={},
+                    state_deltas={(EventTypes.Member, "@user:test"): "$event_id"},
+                    expected_with_state_deltas=(
+                        # If a "$LAZY" has been added or removed we always update the
+                        # required state to what was requested for simplicity.
+                        {},
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        # `EventTypes.Member` is no longer requested but since that
+                        # state hasn't changed, nothing should change (we should still
+                        # keep track that we've sent `EventTypes.Member` before).
+                        None,
+                        # We don't need to request anything more if they are requesting
+                        # less state now
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "type_wildcard_with_state_key_wildcard_to_explicit_state_keys",
+                """
+                Test switching from a wildcard ("*", "*") to explicit state keys
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={
+                        StateValues.WILDCARD: {StateValues.WILDCARD}
+                    },
+                    request_required_state_map={
+                        StateValues.WILDCARD: {"state_key1", "state_key2", "state_key3"}
+                    },
+                    state_deltas={("type1", "state_key1"): "$event_id"},
+                    # If we were previously fetching everything ("*", "*"), always update the effective
+                    # room required state config to match the request. And since we we're previously
+                    # already fetching everything, we don't have to fetch anything now that they've
+                    # narrowed.
+                    expected_with_state_deltas=(
+                        {
+                            StateValues.WILDCARD: {
+                                "state_key1",
+                                "state_key2",
+                                "state_key3",
+                            }
+                        },
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        {
+                            StateValues.WILDCARD: {
+                                "state_key1",
+                                "state_key2",
+                                "state_key3",
+                            }
+                        },
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "type_wildcard_with_explicit_state_keys_to_wildcard_state_key",
+                """
+                Test switching from explicit to wildcard state keys ("*", "*")
+                """,
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={
+                        StateValues.WILDCARD: {"state_key1", "state_key2", "state_key3"}
+                    },
+                    request_required_state_map={
+                        StateValues.WILDCARD: {StateValues.WILDCARD}
+                    },
+                    state_deltas={("type1", "state_key1"): "$event_id"},
+                    # We've added a wildcard, so we persist the change and request everything
+                    expected_with_state_deltas=(
+                        {StateValues.WILDCARD: {StateValues.WILDCARD}},
+                        StateFilter.all(),
+                    ),
+                    expected_without_state_deltas=(
+                        {StateValues.WILDCARD: {StateValues.WILDCARD}},
+                        StateFilter.all(),
+                    ),
+                ),
+            ),
+            (
+                "state_key_wildcard_to_explicit_state_keys",
+                """Test switching from a wildcard to explicit state keys with a concrete type""",
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={"type1": {StateValues.WILDCARD}},
+                    request_required_state_map={
+                        "type1": {"state_key1", "state_key2", "state_key3"}
+                    },
+                    state_deltas={("type1", "state_key1"): "$event_id"},
+                    # If a state_key wildcard has been added or removed, we always
+                    # update the effective room required state config to match the
+                    # request. And since we we're previously already fetching
+                    # everything, we don't have to fetch anything now that they've
+                    # narrowed.
+                    expected_with_state_deltas=(
+                        {
+                            "type1": {
+                                "state_key1",
+                                "state_key2",
+                                "state_key3",
+                            }
+                        },
+                        StateFilter.none(),
+                    ),
+                    expected_without_state_deltas=(
+                        {
+                            "type1": {
+                                "state_key1",
+                                "state_key2",
+                                "state_key3",
+                            }
+                        },
+                        StateFilter.none(),
+                    ),
+                ),
+            ),
+            (
+                "state_key_wildcard_to_explicit_state_keys",
+                """Test switching from a wildcard to explicit state keys with a concrete type""",
+                RequiredStateChangesTestParameters(
+                    previous_required_state_map={
+                        "type1": {"state_key1", "state_key2", "state_key3"}
+                    },
+                    request_required_state_map={"type1": {StateValues.WILDCARD}},
+                    state_deltas={("type1", "state_key1"): "$event_id"},
+                    # If a state_key wildcard has been added or removed, we always
+                    # update the effective room required state config to match the
+                    # request. And we need to request all of the state for that type
+                    # because we previously, only sent down a few keys.
+                    expected_with_state_deltas=(
+                        {"type1": {StateValues.WILDCARD}},
+                        StateFilter.from_types([("type1", None)]),
+                    ),
+                    expected_without_state_deltas=(
+                        {"type1": {StateValues.WILDCARD}},
+                        StateFilter.from_types([("type1", None)]),
+                    ),
+                ),
+            ),
+        ]
+    )
+    def test_xxx(
+        self,
+        _test_label: str,
+        _test_description: str,
+        test_parameters: RequiredStateChangesTestParameters,
+    ) -> None:
+        # Without `state_deltas`
+        changed_required_state_map, added_state_filter = _required_state_changes(
+            user_id="@user:test",
+            previous_room_config=RoomSyncConfig(
+                timeline_limit=0,
+                required_state_map=test_parameters.previous_required_state_map,
+            ),
+            room_sync_config=RoomSyncConfig(
+                timeline_limit=0,
+                required_state_map=test_parameters.request_required_state_map,
+            ),
+            state_deltas={},
+        )
+
+        self.assertEqual(
+            changed_required_state_map,
+            test_parameters.expected_without_state_deltas[0],
+            "changed_required_state_map does not match (without state_deltas)",
+        )
+        self.assertEqual(
+            added_state_filter,
+            test_parameters.expected_without_state_deltas[1],
+            "added_state_filter does not match (without state_deltas)",
+        )
+
+        # With `state_deltas`
+        changed_required_state_map, added_state_filter = _required_state_changes(
+            user_id="@user:test",
+            previous_room_config=RoomSyncConfig(
+                timeline_limit=0,
+                required_state_map=test_parameters.previous_required_state_map,
+            ),
+            room_sync_config=RoomSyncConfig(
+                timeline_limit=0,
+                required_state_map=test_parameters.request_required_state_map,
+            ),
+            state_deltas=test_parameters.state_deltas,
+        )
+
+        self.assertEqual(
+            changed_required_state_map,
+            test_parameters.expected_with_state_deltas[0],
+            "changed_required_state_map does not match (with state_deltas)",
+        )
+        self.assertEqual(
+            added_state_filter,
+            test_parameters.expected_with_state_deltas[1],
+            "added_state_filter does not match (with state_deltas)",
+        )
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index 674dd4fb54..d7bbc68037 100644
--- a/tests/handlers/test_sync.py
+++ b/tests/handlers/test_sync.py
@@ -210,8 +210,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
             )
 
         # Blow away caches (supported room versions can only change due to a restart).
-        self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()
         self.store.get_rooms_for_user.invalidate_all()
+        self.store._get_rooms_for_local_user_where_membership_is_inner.invalidate_all()
         self.store._get_event_cache.clear()
         self.store._event_ref.clear()
 
@@ -843,7 +843,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
         ) -> List[EventBase]:
             return list(pdus)
 
-        self.client._check_sigs_and_hash_for_pulled_events_and_fetch = _check_sigs_and_hash_for_pulled_events_and_fetch  # type: ignore[assignment]
+        self.client._check_sigs_and_hash_for_pulled_events_and_fetch = (  # type: ignore[method-assign]
+            _check_sigs_and_hash_for_pulled_events_and_fetch  # type: ignore[assignment]
+        )
 
         prev_events = self.get_success(self.store.get_prev_events_for_room(room_id))
 
diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py
index 8e8621e348..ffcbf4b3ca 100644
--- a/tests/http/federation/test_srv_resolver.py
+++ b/tests/http/federation/test_srv_resolver.py
@@ -93,9 +93,7 @@ class SrvResolverTestCase(unittest.TestCase):
         resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
 
         servers: List[Server]
-        servers = yield defer.ensureDeferred(
-            resolver.resolve_service(service_name)
-        )  # type: ignore[assignment]
+        servers = yield defer.ensureDeferred(resolver.resolve_service(service_name))  # type: ignore[assignment]
 
         dns_client_mock.lookupService.assert_called_once_with(service_name)
 
@@ -122,9 +120,7 @@ class SrvResolverTestCase(unittest.TestCase):
         )
 
         servers: List[Server]
-        servers = yield defer.ensureDeferred(
-            resolver.resolve_service(service_name)
-        )  # type: ignore[assignment]
+        servers = yield defer.ensureDeferred(resolver.resolve_service(service_name))  # type: ignore[assignment]
 
         self.assertFalse(dns_client_mock.lookupService.called)
 
@@ -157,9 +153,7 @@ class SrvResolverTestCase(unittest.TestCase):
         resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
 
         servers: List[Server]
-        servers = yield defer.ensureDeferred(
-            resolver.resolve_service(service_name)
-        )  # type: ignore[assignment]
+        servers = yield defer.ensureDeferred(resolver.resolve_service(service_name))  # type: ignore[assignment]
 
         self.assertEqual(len(servers), 0)
         self.assertEqual(len(cache), 0)
diff --git a/tests/http/test_client.py b/tests/http/test_client.py
index 721917f957..ac6470ebbd 100644
--- a/tests/http/test_client.py
+++ b/tests/http/test_client.py
@@ -49,8 +49,11 @@ from tests.unittest import TestCase
 
 
 class ReadMultipartResponseTests(TestCase):
-    data1 = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_"
-    data2 = b"to_stream\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
+    multipart_response_data1 = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_"
+    multipart_response_data2 = (
+        b"to_stream\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
+    )
+    multipart_response_data_cased = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\ncOntEnt-type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-tyPe: text/plain\r\nconTent-dispOsition: inline; filename=test_upload\r\n\r\nfile_"
 
     redirect_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nLocation: https://cdn.example.org/ab/c1/2345.txt\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
 
@@ -103,8 +106,31 @@ class ReadMultipartResponseTests(TestCase):
         result, deferred, protocol = self._build_multipart_response(249, 250)
 
         # Start sending data.
-        protocol.dataReceived(self.data1)
-        protocol.dataReceived(self.data2)
+        protocol.dataReceived(self.multipart_response_data1)
+        protocol.dataReceived(self.multipart_response_data2)
+        # Close the connection.
+        protocol.connectionLost(Failure(ResponseDone()))
+
+        multipart_response: MultipartResponse = deferred.result  # type: ignore[assignment]
+
+        self.assertEqual(multipart_response.json, b"{}")
+        self.assertEqual(result.getvalue(), b"file_to_stream")
+        self.assertEqual(multipart_response.length, len(b"file_to_stream"))
+        self.assertEqual(multipart_response.content_type, b"text/plain")
+        self.assertEqual(
+            multipart_response.disposition, b"inline; filename=test_upload"
+        )
+
+    def test_parse_file_lowercase_headers(self) -> None:
+        """
+        Check that a multipart response containing a file is properly parsed
+        into the json/file parts, and the json and file are properly captured if the http headers are lowercased
+        """
+        result, deferred, protocol = self._build_multipart_response(249, 250)
+
+        # Start sending data.
+        protocol.dataReceived(self.multipart_response_data_cased)
+        protocol.dataReceived(self.multipart_response_data2)
         # Close the connection.
         protocol.connectionLost(Failure(ResponseDone()))
 
@@ -143,7 +169,7 @@ class ReadMultipartResponseTests(TestCase):
         result, deferred, protocol = self._build_multipart_response(UNKNOWN_LENGTH, 180)
 
         # Start sending data.
-        protocol.dataReceived(self.data1)
+        protocol.dataReceived(self.multipart_response_data1)
 
         self.assertEqual(result.getvalue(), b"file_")
         self._assert_error(deferred, protocol)
@@ -154,11 +180,11 @@ class ReadMultipartResponseTests(TestCase):
         result, deferred, protocol = self._build_multipart_response(UNKNOWN_LENGTH, 180)
 
         # Start sending data.
-        protocol.dataReceived(self.data1)
+        protocol.dataReceived(self.multipart_response_data1)
         self._assert_error(deferred, protocol)
 
         # More data might have come in.
-        protocol.dataReceived(self.data2)
+        protocol.dataReceived(self.multipart_response_data2)
 
         self.assertEqual(result.getvalue(), b"file_")
         self._assert_error(deferred, protocol)
@@ -172,7 +198,7 @@ class ReadMultipartResponseTests(TestCase):
         self.assertFalse(deferred.called)
 
         # Start sending data.
-        protocol.dataReceived(self.data1)
+        protocol.dataReceived(self.multipart_response_data1)
         self._assert_error(deferred, protocol)
         self._cleanup_error(deferred)
 
@@ -181,7 +207,9 @@ class ReadMultipartResponseTests(TestCase):
 
 
 class ReadBodyWithMaxSizeTests(TestCase):
-    def _build_response(self, length: Union[int, str] = UNKNOWN_LENGTH) -> Tuple[
+    def _build_response(
+        self, length: Union[int, str] = UNKNOWN_LENGTH
+    ) -> Tuple[
         BytesIO,
         "Deferred[int]",
         _DiscardBodyWithMaxSizeProtocol,
diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py
index e2f033fdae..6588695e37 100644
--- a/tests/http/test_matrixfederationclient.py
+++ b/tests/http/test_matrixfederationclient.py
@@ -17,6 +17,7 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
+import io
 from typing import Any, Dict, Generator
 from unittest.mock import ANY, Mock, create_autospec
 
@@ -32,7 +33,9 @@ from twisted.web.http import HTTPChannel
 from twisted.web.http_headers import Headers
 
 from synapse.api.errors import HttpResponseException, RequestSendFailed
+from synapse.api.ratelimiting import Ratelimiter
 from synapse.config._base import ConfigError
+from synapse.config.ratelimiting import RatelimitSettings
 from synapse.http.matrixfederationclient import (
     ByteParser,
     MatrixFederationHttpClient,
@@ -337,6 +340,81 @@ class FederationClientTests(HomeserverTestCase):
         r = self.successResultOf(d)
         self.assertEqual(r.code, 200)
 
+    def test_authed_media_redirect_response(self) -> None:
+        """
+        Validate that, when following a `Location` redirect, the
+        maximum size is _not_ set to the initial response `Content-Length` and
+        the media file can be downloaded.
+        """
+        limiter = Ratelimiter(
+            store=self.hs.get_datastores().main,
+            clock=self.clock,
+            cfg=RatelimitSettings(key="", per_second=0.17, burst_count=1048576),
+        )
+
+        output_stream = io.BytesIO()
+
+        d = defer.ensureDeferred(
+            self.cl.federation_get_file(
+                "testserv:8008", "path", output_stream, limiter, "127.0.0.1", 10000
+            )
+        )
+
+        self.pump()
+
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (host, port, factory, _timeout, _bindAddress) = clients[0]
+        self.assertEqual(host, "1.2.3.4")
+        self.assertEqual(port, 8008)
+
+        # complete the connection and wire it up to a fake transport
+        protocol = factory.buildProtocol(None)
+        transport = StringTransport()
+        protocol.makeConnection(transport)
+
+        # Deferred does not have a result
+        self.assertNoResult(d)
+
+        redirect_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nLocation: http://testserv:8008/ab/c1/2345.txt\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
+        protocol.dataReceived(
+            b"HTTP/1.1 200 OK\r\n"
+            b"Server: Fake\r\n"
+            b"Content-Length: %i\r\n"
+            b"Content-Type: multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a\r\n\r\n"
+            % (len(redirect_data))
+        )
+        protocol.dataReceived(redirect_data)
+
+        # Still no result, not followed the redirect yet
+        self.assertNoResult(d)
+
+        # Now send the response returned by the server at `Location`
+        clients = self.reactor.tcpClients
+        (host, port, factory, _timeout, _bindAddress) = clients[1]
+        self.assertEqual(host, "1.2.3.4")
+        self.assertEqual(port, 8008)
+        protocol = factory.buildProtocol(None)
+        transport = StringTransport()
+        protocol.makeConnection(transport)
+
+        # make sure the length is longer than the initial response
+        data = b"Hello world!" * 30
+        protocol.dataReceived(
+            b"HTTP/1.1 200 OK\r\n"
+            b"Server: Fake\r\n"
+            b"Content-Length: %i\r\n"
+            b"Content-Type: text/plain\r\n"
+            b"\r\n"
+            b"%s\r\n"
+            b"\r\n" % (len(data), data)
+        )
+
+        # We should get a successful response
+        length, _, _ = self.successResultOf(d)
+        self.assertEqual(length, len(data))
+        self.assertEqual(output_stream.getvalue(), data)
+
     @parameterized.expand(["get_json", "post_json", "delete_json", "put_json"])
     def test_timeout_reading_body(self, method_name: str) -> None:
         """
@@ -817,21 +895,23 @@ class FederationClientProxyTests(BaseMultiWorkerStreamTestCase):
         )
 
         # Fake `remoteserv:8008` responding to requests
-        mock_agent_on_federation_sender.request.side_effect = lambda *args, **kwargs: defer.succeed(
-            FakeResponse(
-                code=200,
-                body=b'{"foo": "bar"}',
-                headers=Headers(
-                    {
-                        "Content-Type": ["application/json"],
-                        "Connection": ["close, X-Foo, X-Bar"],
-                        # Should be removed because it's defined in the `Connection` header
-                        "X-Foo": ["foo"],
-                        "X-Bar": ["bar"],
-                        # Should be removed because it's a hop-by-hop header
-                        "Proxy-Authorization": "abcdef",
-                    }
-                ),
+        mock_agent_on_federation_sender.request.side_effect = (
+            lambda *args, **kwargs: defer.succeed(
+                FakeResponse(
+                    code=200,
+                    body=b'{"foo": "bar"}',
+                    headers=Headers(
+                        {
+                            "Content-Type": ["application/json"],
+                            "Connection": ["close, X-Foo, X-Bar"],
+                            # Should be removed because it's defined in the `Connection` header
+                            "X-Foo": ["foo"],
+                            "X-Bar": ["bar"],
+                            # Should be removed because it's a hop-by-hop header
+                            "Proxy-Authorization": "abcdef",
+                        }
+                    ),
+                )
             )
         )
 
diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py
index 18af2735fe..db39ecf244 100644
--- a/tests/http/test_servlet.py
+++ b/tests/http/test_servlet.py
@@ -76,7 +76,7 @@ class TestServletUtils(unittest.TestCase):
 
         # Invalid UTF-8.
         with self.assertRaises(SynapseError):
-            parse_json_value_from_request(make_request(b"\xFF\x00"))
+            parse_json_value_from_request(make_request(b"\xff\x00"))
 
         # Invalid JSON.
         with self.assertRaises(SynapseError):
diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py
index 024086b775..e50ff5fa78 100644
--- a/tests/media/test_media_storage.py
+++ b/tests/media/test_media_storage.py
@@ -18,7 +18,6 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
-import itertools
 import os
 import shutil
 import tempfile
@@ -227,19 +226,15 @@ test_images = [
     empty_file,
     SVG,
 ]
-urls = [
-    "_matrix/media/r0/thumbnail",
-    "_matrix/client/unstable/org.matrix.msc3916/media/thumbnail",
-]
+input_values = [(x,) for x in test_images]
 
 
-@parameterized_class(("test_image", "url"), itertools.product(test_images, urls))
+@parameterized_class(("test_image",), input_values)
 class MediaRepoTests(unittest.HomeserverTestCase):
     servlets = [media.register_servlets]
     test_image: ClassVar[TestImage]
     hijack_auth = True
     user_id = "@test:user"
-    url: ClassVar[str]
 
     def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         self.fetches: List[
@@ -266,7 +261,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
             """A mock for MatrixFederationHttpClient.get_file."""
 
             def write_to(
-                r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]
+                r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]],
             ) -> Tuple[int, Dict[bytes, List[bytes]]]:
                 data, response = r
                 output_stream.write(data)
@@ -304,7 +299,6 @@ class MediaRepoTests(unittest.HomeserverTestCase):
             "config": {"directory": self.storage_path},
         }
         config["media_storage_providers"] = [provider_config]
-        config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
 
         hs = self.setup_test_homeserver(config=config, federation_http_client=client)
 
@@ -509,7 +503,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
         params = "?width=32&height=32&method=scale"
         channel = self.make_request(
             "GET",
-            f"/{self.url}/{self.media_id}{params}",
+            f"/_matrix/media/r0/thumbnail/{self.media_id}{params}",
             shorthand=False,
             await_result=False,
         )
@@ -537,7 +531,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            f"/{self.url}/{self.media_id}{params}",
+            f"/_matrix/media/r0/thumbnail/{self.media_id}{params}",
             shorthand=False,
             await_result=False,
         )
@@ -573,7 +567,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
         params = "?width=32&height=32&method=" + method
         channel = self.make_request(
             "GET",
-            f"/{self.url}/{self.media_id}{params}",
+            f"/_matrix/media/r0/thumbnail/{self.media_id}{params}",
             shorthand=False,
             await_result=False,
         )
@@ -608,7 +602,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
                 channel.json_body,
                 {
                     "errcode": "M_UNKNOWN",
-                    "error": f"Cannot find any thumbnails for the requested media ('/{self.url}/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)",
+                    "error": "Cannot find any thumbnails for the requested media ('/_matrix/media/r0/thumbnail/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)",
                 },
             )
         else:
@@ -618,7 +612,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
                 channel.json_body,
                 {
                     "errcode": "M_NOT_FOUND",
-                    "error": f"Not found '/{self.url}/example.com/12345'",
+                    "error": "Not found '/_matrix/media/r0/thumbnail/example.com/12345'",
                 },
             )
 
@@ -1063,13 +1057,15 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
         )
         assert channel.code == 200
 
+    @override_config({"remote_media_download_burst_count": "87M"})
     @patch(
         "synapse.http.matrixfederationclient.read_body_with_max_size",
         read_body_with_max_size_30MiB,
     )
-    def test_download_ratelimit_max_size_sub(self) -> None:
+    def test_download_ratelimit_unknown_length(self) -> None:
         """
-        Test that if no content-length is provided, the default max size is applied instead
+        Test that if no content-length is provided, ratelimit will still be applied after
+        download once length is known
         """
 
         # mock out actually sending the request
@@ -1083,19 +1079,48 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
 
         self.client._send_request = _send_request  # type: ignore
 
-        # ten requests should go through using the max size (500MB/50MB)
-        for i in range(10):
-            channel2 = self.make_request(
+        # 3 requests should go through (note 3rd one would technically violate ratelimit but
+        # is applied *after* download - the next one will be ratelimited)
+        for i in range(3):
+            channel = self.make_request(
                 "GET",
                 f"/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxy{i}",
                 shorthand=False,
             )
-            assert channel2.code == 200
+            assert channel.code == 200
 
-        # eleventh will hit ratelimit
-        channel3 = self.make_request(
+        # 4th will hit ratelimit
+        channel2 = self.make_request(
             "GET",
             "/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyx",
             shorthand=False,
         )
-        assert channel3.code == 429
+        assert channel2.code == 429
+
+    @override_config({"max_upload_size": "29M"})
+    @patch(
+        "synapse.http.matrixfederationclient.read_body_with_max_size",
+        read_body_with_max_size_30MiB,
+    )
+    def test_max_download_respected(self) -> None:
+        """
+        Test that the max download size is enforced - note that max download size is determined
+        by the max_upload_size
+        """
+
+        # mock out actually sending the request
+        async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+            resp = MagicMock(spec=IResponse)
+            resp.code = 200
+            resp.length = 31457280
+            resp.headers = Headers({"Content-Type": ["application/octet-stream"]})
+            resp.phrase = b"OK"
+            return resp
+
+        self.client._send_request = _send_request  # type: ignore
+
+        channel = self.make_request(
+            "GET", "/_matrix/media/v3/download/remote.org/abcd", shorthand=False
+        )
+        assert channel.code == 502
+        assert channel.json_body["errcode"] == "M_TOO_LARGE"
diff --git a/tests/media/test_oembed.py b/tests/media/test_oembed.py
index 29d4580697..b8265ff9ca 100644
--- a/tests/media/test_oembed.py
+++ b/tests/media/test_oembed.py
@@ -20,6 +20,7 @@
 #
 
 import json
+from typing import Any
 
 from parameterized import parameterized
 
@@ -52,6 +53,7 @@ class OEmbedTests(HomeserverTestCase):
 
     def test_version(self) -> None:
         """Accept versions that are similar to 1.0 as a string or int (or missing)."""
+        version: Any
         for version in ("1.0", 1.0, 1):
             result = self.parse_response({"version": version})
             # An empty Open Graph response is an error, ensure the URL is included.
@@ -69,6 +71,7 @@ class OEmbedTests(HomeserverTestCase):
 
     def test_cache_age(self) -> None:
         """Ensure a cache-age is parsed properly."""
+        cache_age: Any
         # Correct-ish cache ages are allowed.
         for cache_age in ("1", 1.0, 1):
             result = self.parse_response({"cache_age": cache_age})
diff --git a/tests/module_api/test_account_data_manager.py b/tests/module_api/test_account_data_manager.py
index fd87eaffd0..1a1d5609b2 100644
--- a/tests/module_api/test_account_data_manager.py
+++ b/tests/module_api/test_account_data_manager.py
@@ -164,6 +164,8 @@ class ModuleApiTestCase(HomeserverTestCase):
             # noinspection PyTypeChecker
             self.get_success_or_raise(
                 self._module_api.account_data_manager.put_global(
-                    self.user_id, "test.data", 42  # type: ignore[arg-type]
+                    self.user_id,
+                    "test.data",
+                    42,  # type: ignore[arg-type]
                 )
             )
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index e0aab1c046..4fafb71897 100644
--- a/tests/push/test_email.py
+++ b/tests/push/test_email.py
@@ -44,6 +44,7 @@ from tests.unittest import HomeserverTestCase
 @attr.s(auto_attribs=True)
 class _User:
     "Helper wrapper for user ID and access token"
+
     id: str
     token: str
 
diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py
index 420fbea998..3898532acf 100644
--- a/tests/push/test_push_rule_evaluator.py
+++ b/tests/push/test_push_rule_evaluator.py
@@ -149,6 +149,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
         content: JsonMapping,
         *,
         related_events: Optional[JsonDict] = None,
+        msc4210: bool = False,
     ) -> PushRuleEvaluator:
         event = FrozenEvent(
             {
@@ -174,6 +175,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
             related_event_match_enabled=True,
             room_version_feature_flags=event.room_version.msc3931_push_features,
             msc3931_enabled=True,
+            msc4210_enabled=msc4210,
         )
 
     def test_display_name(self) -> None:
@@ -452,6 +454,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
             {"value": False},
             "incorrect values should not match",
         )
+        value: Any
         for value in ("foobaz", 1, 1.1, None, [], {}):
             self._assert_not_matches(
                 condition,
@@ -492,6 +495,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
             {"value": None},
             "exact value should match",
         )
+        value: Any
         for value in ("foobaz", True, False, 1, 1.1, [], {}):
             self._assert_not_matches(
                 condition,
diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py
index a56f1e2d5d..1afe523d02 100644
--- a/tests/replication/storage/test_events.py
+++ b/tests/replication/storage/test_events.py
@@ -30,19 +30,16 @@ from synapse.api.constants import ReceiptTypes
 from synapse.api.room_versions import RoomVersions
 from synapse.events import EventBase, make_event_from_dict
 from synapse.events.snapshot import EventContext
-from synapse.handlers.room import RoomEventSource
 from synapse.server import HomeServer
 from synapse.storage.databases.main.event_push_actions import (
     NotifCounts,
     RoomNotifCounts,
 )
 from synapse.storage.databases.main.events_worker import EventsWorkerStore
-from synapse.storage.roommember import GetRoomsForUserWithStreamOrdering, RoomsForUser
+from synapse.storage.roommember import RoomsForUser
 from synapse.types import PersistedEventPosition
 from synapse.util import Clock
 
-from tests.server import FakeTransport
-
 from ._base import BaseWorkerStoreTestCase
 
 USER_ID = "@feeling:test"
@@ -221,125 +218,6 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
             ),
         )
 
-    def test_get_rooms_for_user_with_stream_ordering(self) -> None:
-        """Check that the cache on get_rooms_for_user_with_stream_ordering is invalidated
-        by rows in the events stream
-        """
-        self.persist(type="m.room.create", key="", creator=USER_ID)
-        self.persist(type="m.room.member", key=USER_ID, membership="join")
-        self.replicate()
-        self.check("get_rooms_for_user_with_stream_ordering", (USER_ID_2,), set())
-
-        j2 = self.persist(
-            type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
-        )
-        assert j2.internal_metadata.instance_name is not None
-        assert j2.internal_metadata.stream_ordering is not None
-        self.replicate()
-
-        expected_pos = PersistedEventPosition(
-            j2.internal_metadata.instance_name, j2.internal_metadata.stream_ordering
-        )
-        self.check(
-            "get_rooms_for_user_with_stream_ordering",
-            (USER_ID_2,),
-            {GetRoomsForUserWithStreamOrdering(ROOM_ID, expected_pos)},
-        )
-
-    def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist(
-        self,
-    ) -> None:
-        """Check that current_state invalidation happens correctly with multiple events
-        in the persistence batch.
-
-        This test attempts to reproduce a race condition between the event persistence
-        loop and a worker-based Sync handler.
-
-        The problem occurred when the master persisted several events in one batch. It
-        only updates the current_state at the end of each batch, so the obvious thing
-        to do is then to issue a current_state_delta stream update corresponding to the
-        last stream_id in the batch.
-
-        However, that raises the possibility that a worker will see the replication
-        notification for a join event before the current_state caches are invalidated.
-
-        The test involves:
-         * creating a join and a message event for a user, and persisting them in the
-           same batch
-
-         * controlling the replication stream so that updates are sent gradually
-
-         * between each bunch of replication updates, check that we see a consistent
-           snapshot of the state.
-        """
-        self.persist(type="m.room.create", key="", creator=USER_ID)
-        self.persist(type="m.room.member", key=USER_ID, membership="join")
-        self.replicate()
-        self.check("get_rooms_for_user_with_stream_ordering", (USER_ID_2,), set())
-
-        # limit the replication rate
-        repl_transport = self._server_transport
-        assert isinstance(repl_transport, FakeTransport)
-        repl_transport.autoflush = False
-
-        # build the join and message events and persist them in the same batch.
-        logger.info("----- build test events ------")
-        j2, j2ctx = self.build_event(
-            type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
-        )
-        msg, msgctx = self.build_event()
-        self.get_success(self.persistance.persist_events([(j2, j2ctx), (msg, msgctx)]))
-        self.replicate()
-        assert j2.internal_metadata.instance_name is not None
-        assert j2.internal_metadata.stream_ordering is not None
-
-        event_source = RoomEventSource(self.hs)
-        event_source.store = self.worker_store
-        current_token = event_source.get_current_key()
-
-        # gradually stream out the replication
-        while repl_transport.buffer:
-            logger.info("------ flush ------")
-            repl_transport.flush(30)
-            self.pump(0)
-
-            prev_token = current_token
-            current_token = event_source.get_current_key()
-
-            # attempt to replicate the behaviour of the sync handler.
-            #
-            # First, we get a list of the rooms we are joined to
-            joined_rooms = self.get_success(
-                self.worker_store.get_rooms_for_user_with_stream_ordering(USER_ID_2)
-            )
-
-            # Then, we get a list of the events since the last sync
-            membership_changes = self.get_success(
-                self.worker_store.get_membership_changes_for_user(
-                    USER_ID_2, prev_token, current_token
-                )
-            )
-
-            logger.info(
-                "%s->%s: joined_rooms=%r membership_changes=%r",
-                prev_token,
-                current_token,
-                joined_rooms,
-                membership_changes,
-            )
-
-            # the membership change is only any use to us if the room is in the
-            # joined_rooms list.
-            if membership_changes:
-                expected_pos = PersistedEventPosition(
-                    j2.internal_metadata.instance_name,
-                    j2.internal_metadata.stream_ordering,
-                )
-                self.assertEqual(
-                    joined_rooms,
-                    {GetRoomsForUserWithStreamOrdering(ROOM_ID, expected_pos)},
-                )
-
     event_id = 0
 
     def persist(self, backfill: bool = False, **kwargs: Any) -> EventBase:
diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py
index 2a1e42bbc8..150caeeee2 100644
--- a/tests/rest/admin/test_server_notice.py
+++ b/tests/rest/admin/test_server_notice.py
@@ -531,9 +531,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
 
         # simulate a change in server config after a server restart.
         new_display_name = "new display name"
-        self.server_notices_manager._config.servernotices.server_notices_mxid_display_name = (
-            new_display_name
-        )
+        self.server_notices_manager._config.servernotices.server_notices_mxid_display_name = new_display_name
         self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all()
 
         self.make_request(
@@ -577,9 +575,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
 
         # simulate a change in server config after a server restart.
         new_avatar_url = "test/new-url"
-        self.server_notices_manager._config.servernotices.server_notices_mxid_avatar_url = (
-            new_avatar_url
-        )
+        self.server_notices_manager._config.servernotices.server_notices_mxid_avatar_url = new_avatar_url
         self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all()
 
         self.make_request(
@@ -692,9 +688,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
 
         # simulate a change in server config after a server restart.
         new_avatar_url = "test/new-url"
-        self.server_notices_manager._config.servernotices.server_notices_room_avatar_url = (
-            new_avatar_url
-        )
+        self.server_notices_manager._config.servernotices.server_notices_room_avatar_url = new_avatar_url
         self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all()
 
         self.make_request(
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 16bb4349f5..6982c7291a 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -21,9 +21,11 @@
 
 import hashlib
 import hmac
+import json
 import os
 import urllib.parse
 from binascii import unhexlify
+from http import HTTPStatus
 from typing import Dict, List, Optional
 from unittest.mock import AsyncMock, Mock, patch
 
@@ -33,7 +35,7 @@ from twisted.test.proto_helpers import MemoryReactor
 from twisted.web.resource import Resource
 
 import synapse.rest.admin
-from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes
+from synapse.api.constants import ApprovalNoticeMedium, EventTypes, LoginType, UserTypes
 from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError
 from synapse.api.room_versions import RoomVersions
 from synapse.media.filepath import MediaFilePaths
@@ -5089,3 +5091,316 @@ class UserSuspensionTestCase(unittest.HomeserverTestCase):
 
         res5 = self.get_success(self.store.get_user_suspended_status(self.bad_user))
         self.assertEqual(True, res5)
+
+
+class UserRedactionTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        admin.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.admin = self.register_user("thomas", "pass", True)
+        self.admin_tok = self.login("thomas", "pass")
+
+        self.bad_user = self.register_user("teresa", "pass")
+        self.bad_user_tok = self.login("teresa", "pass")
+
+        self.store = hs.get_datastores().main
+
+        self.spam_checker = hs.get_module_api_callbacks().spam_checker
+
+        # create rooms - room versions 11+ store the `redacts` key in content while
+        # earlier ones don't so we use a mix of room versions
+        self.rm1 = self.helper.create_room_as(
+            self.admin, tok=self.admin_tok, room_version="7"
+        )
+        self.rm2 = self.helper.create_room_as(self.admin, tok=self.admin_tok)
+        self.rm3 = self.helper.create_room_as(
+            self.admin, tok=self.admin_tok, room_version="11"
+        )
+
+    def test_redact_messages_all_rooms(self) -> None:
+        """
+        Test that request to redact events in all rooms user is member of is successful
+        """
+
+        # join rooms, send some messages
+        originals = []
+        for rm in [self.rm1, self.rm2, self.rm3]:
+            join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok)
+            originals.append(join["event_id"])
+            for i in range(15):
+                event = {"body": f"hello{i}", "msgtype": "m.text"}
+                res = self.helper.send_event(
+                    rm, "m.room.message", event, tok=self.bad_user_tok, expect_code=200
+                )
+                originals.append(res["event_id"])
+
+        # redact all events in all rooms
+        channel = self.make_request(
+            "POST",
+            f"/_synapse/admin/v1/user/{self.bad_user}/redact",
+            content={"rooms": []},
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel.code, 200)
+
+        matched = []
+        for rm in [self.rm1, self.rm2, self.rm3]:
+            filter = json.dumps({"types": [EventTypes.Redaction]})
+            channel = self.make_request(
+                "GET",
+                f"rooms/{rm}/messages?filter={filter}&limit=50",
+                access_token=self.admin_tok,
+            )
+            self.assertEqual(channel.code, 200)
+
+            for event in channel.json_body["chunk"]:
+                for event_id in originals:
+                    if (
+                        event["type"] == "m.room.redaction"
+                        and event["redacts"] == event_id
+                    ):
+                        matched.append(event_id)
+        self.assertEqual(len(matched), len(originals))
+
+    def test_redact_messages_specific_rooms(self) -> None:
+        """
+        Test that request to redact events in specified rooms user is member of is successful
+        """
+
+        originals = []
+        for rm in [self.rm1, self.rm2, self.rm3]:
+            join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok)
+            originals.append(join["event_id"])
+            for i in range(15):
+                event = {"body": f"hello{i}", "msgtype": "m.text"}
+                res = self.helper.send_event(
+                    rm, "m.room.message", event, tok=self.bad_user_tok
+                )
+                originals.append(res["event_id"])
+
+        # redact messages in rooms 1 and 3
+        channel = self.make_request(
+            "POST",
+            f"/_synapse/admin/v1/user/{self.bad_user}/redact",
+            content={"rooms": [self.rm1, self.rm3]},
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel.code, 200)
+
+        # messages in requested rooms are redacted
+        for rm in [self.rm1, self.rm3]:
+            filter = json.dumps({"types": [EventTypes.Redaction]})
+            channel = self.make_request(
+                "GET",
+                f"rooms/{rm}/messages?filter={filter}&limit=50",
+                access_token=self.admin_tok,
+            )
+            self.assertEqual(channel.code, 200)
+
+            matches = []
+            for event in channel.json_body["chunk"]:
+                for event_id in originals:
+                    if (
+                        event["type"] == "m.room.redaction"
+                        and event["redacts"] == event_id
+                    ):
+                        matches.append((event_id, event))
+            # we redacted 16 messages
+            self.assertEqual(len(matches), 16)
+
+        channel = self.make_request(
+            "GET", f"rooms/{self.rm2}/messages?limit=50", access_token=self.admin_tok
+        )
+        self.assertEqual(channel.code, 200)
+
+        # messages in remaining room are not
+        for event in channel.json_body["chunk"]:
+            if event["type"] == "m.room.redaction":
+                self.fail("found redaction in room 2")
+
+    def test_redact_status(self) -> None:
+        rm2_originals = []
+        for rm in [self.rm1, self.rm2, self.rm3]:
+            join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok)
+            if rm == self.rm2:
+                rm2_originals.append(join["event_id"])
+            for i in range(5):
+                event = {"body": f"hello{i}", "msgtype": "m.text"}
+                res = self.helper.send_event(
+                    rm, "m.room.message", event, tok=self.bad_user_tok
+                )
+                if rm == self.rm2:
+                    rm2_originals.append(res["event_id"])
+
+        # redact messages in rooms 1 and 3
+        channel = self.make_request(
+            "POST",
+            f"/_synapse/admin/v1/user/{self.bad_user}/redact",
+            content={"rooms": [self.rm1, self.rm3]},
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel.code, 200)
+        id = channel.json_body.get("redact_id")
+
+        channel2 = self.make_request(
+            "GET",
+            f"/_synapse/admin/v1/user/redact_status/{id}",
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel2.code, 200)
+        self.assertEqual(channel2.json_body.get("status"), "complete")
+        self.assertEqual(channel2.json_body.get("failed_redactions"), {})
+
+        # mock that will cause persisting the redaction events to fail
+        async def check_event_for_spam(event: str) -> str:
+            return "spam"
+
+        self.spam_checker.check_event_for_spam = check_event_for_spam  # type: ignore
+
+        channel3 = self.make_request(
+            "POST",
+            f"/_synapse/admin/v1/user/{self.bad_user}/redact",
+            content={"rooms": [self.rm2]},
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel.code, 200)
+        id = channel3.json_body.get("redact_id")
+
+        channel4 = self.make_request(
+            "GET",
+            f"/_synapse/admin/v1/user/redact_status/{id}",
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel4.code, 200)
+        self.assertEqual(channel4.json_body.get("status"), "complete")
+        failed_redactions = channel4.json_body.get("failed_redactions")
+        assert failed_redactions is not None
+        matched = []
+        for original in rm2_originals:
+            if failed_redactions.get(original) is not None:
+                matched.append(original)
+        self.assertEqual(len(matched), len(rm2_originals))
+
+    def test_admin_redact_works_if_user_kicked_or_banned(self) -> None:
+        originals1 = []
+        originals2 = []
+        for rm in [self.rm1, self.rm2, self.rm3]:
+            join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok)
+            if rm in [self.rm1, self.rm3]:
+                originals1.append(join["event_id"])
+            else:
+                originals2.append(join["event_id"])
+            for i in range(5):
+                event = {"body": f"hello{i}", "msgtype": "m.text"}
+                res = self.helper.send_event(
+                    rm, "m.room.message", event, tok=self.bad_user_tok
+                )
+                if rm in [self.rm1, self.rm3]:
+                    originals1.append(res["event_id"])
+                else:
+                    originals2.append(res["event_id"])
+
+        # kick user from rooms 1 and 3
+        for r in [self.rm1, self.rm3]:
+            channel = self.make_request(
+                "POST",
+                f"/_matrix/client/r0/rooms/{r}/kick",
+                content={"reason": "being a bummer", "user_id": self.bad_user},
+                access_token=self.admin_tok,
+            )
+            self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+
+        # redact messages in room 1 and 3
+        channel1 = self.make_request(
+            "POST",
+            f"/_synapse/admin/v1/user/{self.bad_user}/redact",
+            content={"rooms": [self.rm1, self.rm3]},
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel1.code, 200)
+        id = channel1.json_body.get("redact_id")
+
+        # check that there were no failed redactions in room 1 and 3
+        channel2 = self.make_request(
+            "GET",
+            f"/_synapse/admin/v1/user/redact_status/{id}",
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel2.code, 200)
+        self.assertEqual(channel2.json_body.get("status"), "complete")
+        failed_redactions = channel2.json_body.get("failed_redactions")
+        self.assertEqual(failed_redactions, {})
+
+        # double check
+        for rm in [self.rm1, self.rm3]:
+            filter = json.dumps({"types": [EventTypes.Redaction]})
+            channel3 = self.make_request(
+                "GET",
+                f"rooms/{rm}/messages?filter={filter}&limit=50",
+                access_token=self.admin_tok,
+            )
+            self.assertEqual(channel3.code, 200)
+
+            matches = []
+            for event in channel3.json_body["chunk"]:
+                for event_id in originals1:
+                    if (
+                        event["type"] == "m.room.redaction"
+                        and event["redacts"] == event_id
+                    ):
+                        matches.append((event_id, event))
+            # we redacted 6 messages
+            self.assertEqual(len(matches), 6)
+
+        # ban user from room 2
+        channel4 = self.make_request(
+            "POST",
+            f"/_matrix/client/r0/rooms/{self.rm2}/ban",
+            content={"reason": "being a bummer", "user_id": self.bad_user},
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel4.code, HTTPStatus.OK, channel4.result)
+
+        # make a request to ban all user's messages
+        channel5 = self.make_request(
+            "POST",
+            f"/_synapse/admin/v1/user/{self.bad_user}/redact",
+            content={"rooms": []},
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel5.code, 200)
+        id2 = channel5.json_body.get("redact_id")
+
+        # check that there were no failed redactions in room 2
+        channel6 = self.make_request(
+            "GET",
+            f"/_synapse/admin/v1/user/redact_status/{id2}",
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel6.code, 200)
+        self.assertEqual(channel6.json_body.get("status"), "complete")
+        failed_redactions = channel6.json_body.get("failed_redactions")
+        self.assertEqual(failed_redactions, {})
+
+        # double check messages in room 2 were redacted
+        filter = json.dumps({"types": [EventTypes.Redaction]})
+        channel7 = self.make_request(
+            "GET",
+            f"rooms/{self.rm2}/messages?filter={filter}&limit=50",
+            access_token=self.admin_tok,
+        )
+        self.assertEqual(channel7.code, 200)
+
+        matches = []
+        for event in channel7.json_body["chunk"]:
+            for event_id in originals2:
+                if event["type"] == "m.room.redaction" and event["redacts"] == event_id:
+                    matches.append((event_id, event))
+        # we redacted 6 messages
+        self.assertEqual(len(matches), 6)
diff --git a/tests/rest/client/sliding_sync/__init__.py b/tests/rest/client/sliding_sync/__init__.py
new file mode 100644
index 0000000000..c4de9d53e2
--- /dev/null
+++ b/tests/rest/client/sliding_sync/__init__.py
@@ -0,0 +1,13 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
diff --git a/tests/rest/client/sliding_sync/test_connection_tracking.py b/tests/rest/client/sliding_sync/test_connection_tracking.py
new file mode 100644
index 0000000000..5b819103c2
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_connection_tracking.py
@@ -0,0 +1,397 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+
+from parameterized import parameterized, parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import EventTypes
+from synapse.rest.client import login, room, sync
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase):
+    """
+    Test connection tracking in the Sliding Sync API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.storage_controllers = hs.get_storage_controllers()
+
+        super().prepare(reactor, clock, hs)
+
+    def test_rooms_required_state_incremental_sync_LIVE(self) -> None:
+        """Test that we only get state updates in incremental sync for rooms
+        we've already seen (LIVE).
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        [EventTypes.RoomHistoryVisibility, ""],
+                        # This one doesn't exist in the room
+                        [EventTypes.Name, ""],
+                    ],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.RoomHistoryVisibility, "")],
+            },
+            exact=True,
+        )
+
+        # Send a state event
+        self.helper.send_state(
+            room_id1, EventTypes.Name, body={"name": "foo"}, tok=user2_tok
+        )
+
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self.assertNotIn("initial", response_body["rooms"][room_id1])
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Name, "")],
+            },
+            exact=True,
+        )
+
+    @parameterized.expand([(False,), (True,)])
+    def test_rooms_timeline_incremental_sync_PREVIOUSLY(self, limited: bool) -> None:
+        """
+        Test getting room data where we have previously sent down the room, but
+        we missed sending down some timeline events previously and so its status
+        is considered PREVIOUSLY.
+
+        There are two versions of this test, one where there are more messages
+        than the timeline limit, and one where there isn't.
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        timeline_limit = 5
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 0]],
+                    "required_state": [],
+                    "timeline_limit": timeline_limit,
+                }
+            },
+            "conn_id": "conn_id",
+        }
+
+        # The first room gets sent down the initial sync
+        response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok)
+        self.assertCountEqual(
+            response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
+        )
+
+        # We now send down some events in room1 (depending on the test param).
+        expected_events = []  # The set of events in the timeline
+        if limited:
+            for _ in range(10):
+                resp = self.helper.send(room_id1, "msg1", tok=user1_tok)
+                expected_events.append(resp["event_id"])
+        else:
+            resp = self.helper.send(room_id1, "msg1", tok=user1_tok)
+            expected_events.append(resp["event_id"])
+
+        # A second messages happens in the other room, so room1 won't get sent down.
+        self.helper.send(room_id2, "msg", tok=user1_tok)
+
+        # Only the second room gets sent down sync.
+        response_body, from_token = self.do_sync(
+            sync_body, since=initial_from_token, tok=user1_tok
+        )
+
+        self.assertCountEqual(
+            response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
+        )
+
+        # We now send another event to room1, so we should sync all the missing events.
+        resp = self.helper.send(room_id1, "msg2", tok=user1_tok)
+        expected_events.append(resp["event_id"])
+
+        # This sync should contain the messages from room1 not yet sent down.
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        self.assertCountEqual(
+            response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
+        )
+        self.assertNotIn("initial", response_body["rooms"][room_id1])
+
+        self.assertEqual(
+            [ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]],
+            expected_events[-timeline_limit:],
+        )
+        self.assertEqual(response_body["rooms"][room_id1]["limited"], limited)
+        self.assertEqual(response_body["rooms"][room_id1].get("required_state"), None)
+
+    def test_rooms_required_state_incremental_sync_PREVIOUSLY(self) -> None:
+        """
+        Test getting room data where we have previously sent down the room, but
+        we missed sending down some state previously and so its status is
+        considered PREVIOUSLY.
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 0]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        [EventTypes.RoomHistoryVisibility, ""],
+                        # This one doesn't exist in the room
+                        [EventTypes.Name, ""],
+                    ],
+                    "timeline_limit": 0,
+                }
+            },
+            "conn_id": "conn_id",
+        }
+
+        # The first room gets sent down the initial sync
+        response_body, initial_from_token = self.do_sync(sync_body, tok=user1_tok)
+        self.assertCountEqual(
+            response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
+        )
+
+        # We now send down some state in room1
+        resp = self.helper.send_state(
+            room_id1, EventTypes.Name, {"name": "foo"}, tok=user1_tok
+        )
+        name_change_id = resp["event_id"]
+
+        # A second messages happens in the other room, so room1 won't get sent down.
+        self.helper.send(room_id2, "msg", tok=user1_tok)
+
+        # Only the second room gets sent down sync.
+        response_body, from_token = self.do_sync(
+            sync_body, since=initial_from_token, tok=user1_tok
+        )
+
+        self.assertCountEqual(
+            response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
+        )
+
+        # We now send another event to room1, so we should sync all the missing state.
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        # This sync should contain the state changes from room1.
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        self.assertCountEqual(
+            response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
+        )
+        self.assertNotIn("initial", response_body["rooms"][room_id1])
+
+        # We should only see the name change.
+        self.assertEqual(
+            [
+                ev["event_id"]
+                for ev in response_body["rooms"][room_id1]["required_state"]
+            ],
+            [name_change_id],
+        )
+
+    def test_rooms_required_state_incremental_sync_NEVER(self) -> None:
+        """
+        Test getting `required_state` where we have NEVER sent down the room before
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 0]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        [EventTypes.RoomHistoryVisibility, ""],
+                        # This one doesn't exist in the room
+                        [EventTypes.Name, ""],
+                    ],
+                    "timeline_limit": 1,
+                }
+            },
+        }
+
+        # A message happens in the other room, so room1 won't get sent down.
+        self.helper.send(room_id2, "msg", tok=user1_tok)
+
+        # Only the second room gets sent down sync.
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        self.assertCountEqual(
+            response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
+        )
+
+        # We now send another event to room1, so we should send down the full
+        # room.
+        self.helper.send(room_id1, "msg2", tok=user1_tok)
+
+        # This sync should contain the messages from room1 not yet sent down.
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        self.assertCountEqual(
+            response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
+        )
+
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.RoomHistoryVisibility, "")],
+            },
+            exact=True,
+        )
+
+    def test_rooms_timeline_incremental_sync_NEVER(self) -> None:
+        """
+        Test getting timeline room data where we have NEVER sent down the room
+        before
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 0]],
+                    "required_state": [],
+                    "timeline_limit": 5,
+                }
+            },
+        }
+
+        expected_events = []
+        for _ in range(4):
+            resp = self.helper.send(room_id1, "msg", tok=user1_tok)
+            expected_events.append(resp["event_id"])
+
+        # A message happens in the other room, so room1 won't get sent down.
+        self.helper.send(room_id2, "msg", tok=user1_tok)
+
+        # Only the second room gets sent down sync.
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        self.assertCountEqual(
+            response_body["rooms"].keys(), {room_id2}, response_body["rooms"]
+        )
+
+        # We now send another event to room1 so it comes down sync
+        resp = self.helper.send(room_id1, "msg2", tok=user1_tok)
+        expected_events.append(resp["event_id"])
+
+        # This sync should contain the messages from room1 not yet sent down.
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        self.assertCountEqual(
+            response_body["rooms"].keys(), {room_id1}, response_body["rooms"]
+        )
+
+        self.assertEqual(
+            [ev["event_id"] for ev in response_body["rooms"][room_id1]["timeline"]],
+            expected_events,
+        )
+        self.assertEqual(response_body["rooms"][room_id1]["limited"], True)
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
diff --git a/tests/rest/client/sliding_sync/test_extension_account_data.py b/tests/rest/client/sliding_sync/test_extension_account_data.py
new file mode 100644
index 0000000000..799fbb1856
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_extension_account_data.py
@@ -0,0 +1,1056 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import enum
+import logging
+
+from parameterized import parameterized, parameterized_class
+from typing_extensions import assert_never
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import AccountDataTypes
+from synapse.rest.client import login, room, sendtodevice, sync
+from synapse.server import HomeServer
+from synapse.types import StreamKeyType
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+from tests.server import TimedOutException
+
+logger = logging.getLogger(__name__)
+
+
+class TagAction(enum.Enum):
+    ADD = enum.auto()
+    REMOVE = enum.auto()
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase):
+    """Tests for the account_data sliding sync extension"""
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+        sendtodevice.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.account_data_handler = hs.get_account_data_handler()
+
+        super().prepare(reactor, clock, hs)
+
+    def test_no_data_initial_sync(self) -> None:
+        """
+        Test that enabling the account_data extension works during an intitial sync,
+        even if there is no-data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Make an initial Sliding Sync request with the account_data extension enabled
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "account_data": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        global_account_data_map = {
+            global_event["type"]: global_event["content"]
+            for global_event in response_body["extensions"]["account_data"].get(
+                "global"
+            )
+        }
+        self.assertIncludes(
+            global_account_data_map.keys(),
+            # Even though we don't have any global account data set, Synapse saves some
+            # default push rules for us.
+            {AccountDataTypes.PUSH_RULES},
+            exact=True,
+        )
+        # Push rules are a giant chunk of JSON data so we will just assume the value is correct if they key is here.
+        # global_account_data_map[AccountDataTypes.PUSH_RULES]
+
+        # No room account data for this test
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_no_data_incremental_sync(self) -> None:
+        """
+        Test that enabling account_data extension works during an incremental sync, even
+        if there is no-data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "account_data": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make an incremental Sliding Sync request with the account_data extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # There has been no account data changes since the `from_token` so we shouldn't
+        # see any account data here.
+        global_account_data_map = {
+            global_event["type"]: global_event["content"]
+            for global_event in response_body["extensions"]["account_data"].get(
+                "global"
+            )
+        }
+        self.assertIncludes(
+            global_account_data_map.keys(),
+            set(),
+            exact=True,
+        )
+
+        # No room account data for this test
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_global_account_data_initial_sync(self) -> None:
+        """
+        On initial sync, we should return all global account data on initial sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Update the global account data
+        self.get_success(
+            self.account_data_handler.add_account_data_for_user(
+                user_id=user1_id,
+                account_data_type="org.matrix.foobarbaz",
+                content={"foo": "bar"},
+            )
+        )
+
+        # Make an initial Sliding Sync request with the account_data extension enabled
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "account_data": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # It should show us all of the global account data
+        global_account_data_map = {
+            global_event["type"]: global_event["content"]
+            for global_event in response_body["extensions"]["account_data"].get(
+                "global"
+            )
+        }
+        self.assertIncludes(
+            global_account_data_map.keys(),
+            {AccountDataTypes.PUSH_RULES, "org.matrix.foobarbaz"},
+            exact=True,
+        )
+        # Push rules are a giant chunk of JSON data so we will just assume the value is correct if they key is here.
+        # global_account_data_map[AccountDataTypes.PUSH_RULES]
+        self.assertEqual(
+            global_account_data_map["org.matrix.foobarbaz"], {"foo": "bar"}
+        )
+
+        # No room account data for this test
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_global_account_data_incremental_sync(self) -> None:
+        """
+        On incremental sync, we should only account data that has changed since the
+        `from_token`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Add some global account data
+        self.get_success(
+            self.account_data_handler.add_account_data_for_user(
+                user_id=user1_id,
+                account_data_type="org.matrix.foobarbaz",
+                content={"foo": "bar"},
+            )
+        )
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "account_data": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Add some other global account data
+        self.get_success(
+            self.account_data_handler.add_account_data_for_user(
+                user_id=user1_id,
+                account_data_type="org.matrix.doodardaz",
+                content={"doo": "dar"},
+            )
+        )
+
+        # Make an incremental Sliding Sync request with the account_data extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        global_account_data_map = {
+            global_event["type"]: global_event["content"]
+            for global_event in response_body["extensions"]["account_data"].get(
+                "global"
+            )
+        }
+        self.assertIncludes(
+            global_account_data_map.keys(),
+            # We should only see the new global account data that happened after the `from_token`
+            {"org.matrix.doodardaz"},
+            exact=True,
+        )
+        self.assertEqual(
+            global_account_data_map["org.matrix.doodardaz"], {"doo": "dar"}
+        )
+
+        # No room account data for this test
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_room_account_data_initial_sync(self) -> None:
+        """
+        On initial sync, we return all account data for a given room but only for
+        rooms that we request and are being returned in the Sliding Sync response.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a room and add some room account data
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                account_data_type="org.matrix.roorarraz",
+                content={"roo": "rar"},
+            )
+        )
+        # Add a room tag to mark the room as a favourite
+        self.get_success(
+            self.account_data_handler.add_tag_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                tag="m.favourite",
+                content={},
+            )
+        )
+
+        # Create another room with some room account data
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                account_data_type="org.matrix.roorarraz",
+                content={"roo": "rar"},
+            )
+        )
+        # Add a room tag to mark the room as a favourite
+        self.get_success(
+            self.account_data_handler.add_tag_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                tag="m.favourite",
+                content={},
+            )
+        )
+
+        # Make an initial Sliding Sync request with the account_data extension enabled
+        sync_body = {
+            "lists": {},
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            },
+            "extensions": {
+                "account_data": {
+                    "enabled": True,
+                    "rooms": [room_id1, room_id2],
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        self.assertIsNotNone(response_body["extensions"]["account_data"].get("global"))
+        # Even though we requested room2, we only expect room1 to show up because that's
+        # the only room in the Sliding Sync response (room2 is not one of our room
+        # subscriptions or in a sliding window list).
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            {room_id1},
+            exact=True,
+        )
+        account_data_map = {
+            event["type"]: event["content"]
+            for event in response_body["extensions"]["account_data"]
+            .get("rooms")
+            .get(room_id1)
+        }
+        self.assertIncludes(
+            account_data_map.keys(),
+            {"org.matrix.roorarraz", AccountDataTypes.TAG},
+            exact=True,
+        )
+        self.assertEqual(account_data_map["org.matrix.roorarraz"], {"roo": "rar"})
+        self.assertEqual(
+            account_data_map[AccountDataTypes.TAG], {"tags": {"m.favourite": {}}}
+        )
+
+    @parameterized.expand(
+        [
+            ("add tags", TagAction.ADD),
+            ("remove tags", TagAction.REMOVE),
+        ]
+    )
+    def test_room_account_data_incremental_sync(
+        self, test_description: str, tag_action: TagAction
+    ) -> None:
+        """
+        On incremental sync, we return all account data for a given room but only for
+        rooms that we request and are being returned in the Sliding Sync response.
+
+        (HaveSentRoomFlag.LIVE)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a room and add some room account data
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                account_data_type="org.matrix.roorarraz",
+                content={"roo": "rar"},
+            )
+        )
+        # Add a room tag to mark the room as a favourite
+        self.get_success(
+            self.account_data_handler.add_tag_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                tag="m.favourite",
+                content={},
+            )
+        )
+
+        # Create another room with some room account data
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                account_data_type="org.matrix.roorarraz",
+                content={"roo": "rar"},
+            )
+        )
+        # Add a room tag to mark the room as a favourite
+        self.get_success(
+            self.account_data_handler.add_tag_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                tag="m.favourite",
+                content={},
+            )
+        )
+
+        sync_body = {
+            "lists": {},
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            },
+            "extensions": {
+                "account_data": {
+                    "enabled": True,
+                    "rooms": [room_id1, room_id2],
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Add some other room account data
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                account_data_type="org.matrix.roorarraz2",
+                content={"roo": "rar"},
+            )
+        )
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                account_data_type="org.matrix.roorarraz2",
+                content={"roo": "rar"},
+            )
+        )
+        if tag_action == TagAction.ADD:
+            # Add another room tag
+            self.get_success(
+                self.account_data_handler.add_tag_to_room(
+                    user_id=user1_id,
+                    room_id=room_id1,
+                    tag="m.server_notice",
+                    content={},
+                )
+            )
+            self.get_success(
+                self.account_data_handler.add_tag_to_room(
+                    user_id=user1_id,
+                    room_id=room_id2,
+                    tag="m.server_notice",
+                    content={},
+                )
+            )
+        elif tag_action == TagAction.REMOVE:
+            # Remove the room tag
+            self.get_success(
+                self.account_data_handler.remove_tag_from_room(
+                    user_id=user1_id,
+                    room_id=room_id1,
+                    tag="m.favourite",
+                )
+            )
+            self.get_success(
+                self.account_data_handler.remove_tag_from_room(
+                    user_id=user1_id,
+                    room_id=room_id2,
+                    tag="m.favourite",
+                )
+            )
+        else:
+            assert_never(tag_action)
+
+        # Make an incremental Sliding Sync request with the account_data extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        self.assertIsNotNone(response_body["extensions"]["account_data"].get("global"))
+        # Even though we requested room2, we only expect room1 to show up because that's
+        # the only room in the Sliding Sync response (room2 is not one of our room
+        # subscriptions or in a sliding window list).
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            {room_id1},
+            exact=True,
+        )
+        # We should only see the new room account data that happened after the `from_token`
+        account_data_map = {
+            event["type"]: event["content"]
+            for event in response_body["extensions"]["account_data"]
+            .get("rooms")
+            .get(room_id1)
+        }
+        self.assertIncludes(
+            account_data_map.keys(),
+            {"org.matrix.roorarraz2", AccountDataTypes.TAG},
+            exact=True,
+        )
+        self.assertEqual(account_data_map["org.matrix.roorarraz2"], {"roo": "rar"})
+        if tag_action == TagAction.ADD:
+            self.assertEqual(
+                account_data_map[AccountDataTypes.TAG],
+                {"tags": {"m.favourite": {}, "m.server_notice": {}}},
+            )
+        elif tag_action == TagAction.REMOVE:
+            # If we previously showed the client that the room has tags, when it no
+            # longer has tags, we need to show them an empty map.
+            self.assertEqual(
+                account_data_map[AccountDataTypes.TAG],
+                {"tags": {}},
+            )
+        else:
+            assert_never(tag_action)
+
+    @parameterized.expand(
+        [
+            ("add tags", TagAction.ADD),
+            ("remove tags", TagAction.REMOVE),
+        ]
+    )
+    def test_room_account_data_incremental_sync_out_of_range_never(
+        self, test_description: str, tag_action: TagAction
+    ) -> None:
+        """Tests that we don't return account data for rooms that are out of
+        range, but then do send all account data once they're in range.
+
+        (initial/HaveSentRoomFlag.NEVER)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a room and add some room account data
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                account_data_type="org.matrix.roorarraz",
+                content={"roo": "rar"},
+            )
+        )
+        # Add a room tag to mark the room as a favourite
+        self.get_success(
+            self.account_data_handler.add_tag_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                tag="m.favourite",
+                content={},
+            )
+        )
+
+        # Create another room with some room account data
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                account_data_type="org.matrix.roorarraz",
+                content={"roo": "rar"},
+            )
+        )
+        # Add a room tag to mark the room as a favourite
+        self.get_success(
+            self.account_data_handler.add_tag_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                tag="m.favourite",
+                content={},
+            )
+        )
+
+        # Now send a message into room1 so that it is at the top of the list
+        self.helper.send(room_id1, body="new event", tok=user1_tok)
+
+        # Make a SS request for only the top room.
+        sync_body = {
+            "lists": {
+                "main": {
+                    "ranges": [[0, 0]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            },
+            "extensions": {
+                "account_data": {
+                    "enabled": True,
+                    "lists": ["main"],
+                }
+            },
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Only room1 should be in the response since it's the latest room with activity
+        # and our range only includes 1 room.
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            {room_id1},
+            exact=True,
+        )
+
+        # Add some other room account data
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                account_data_type="org.matrix.roorarraz2",
+                content={"roo": "rar"},
+            )
+        )
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                account_data_type="org.matrix.roorarraz2",
+                content={"roo": "rar"},
+            )
+        )
+        if tag_action == TagAction.ADD:
+            # Add another room tag
+            self.get_success(
+                self.account_data_handler.add_tag_to_room(
+                    user_id=user1_id,
+                    room_id=room_id1,
+                    tag="m.server_notice",
+                    content={},
+                )
+            )
+            self.get_success(
+                self.account_data_handler.add_tag_to_room(
+                    user_id=user1_id,
+                    room_id=room_id2,
+                    tag="m.server_notice",
+                    content={},
+                )
+            )
+        elif tag_action == TagAction.REMOVE:
+            # Remove the room tag
+            self.get_success(
+                self.account_data_handler.remove_tag_from_room(
+                    user_id=user1_id,
+                    room_id=room_id1,
+                    tag="m.favourite",
+                )
+            )
+            self.get_success(
+                self.account_data_handler.remove_tag_from_room(
+                    user_id=user1_id,
+                    room_id=room_id2,
+                    tag="m.favourite",
+                )
+            )
+        else:
+            assert_never(tag_action)
+
+        # Move room2 into range.
+        self.helper.send(room_id2, body="new event", tok=user1_tok)
+
+        # Make an incremental Sliding Sync request with the account_data extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        self.assertIsNotNone(response_body["extensions"]["account_data"].get("global"))
+        # We expect to see the account data of room2, as that has the most
+        # recent update.
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            {room_id2},
+            exact=True,
+        )
+        # Since this is the first time we're seeing room2 down sync, we should see all
+        # room account data for it.
+        account_data_map = {
+            event["type"]: event["content"]
+            for event in response_body["extensions"]["account_data"]
+            .get("rooms")
+            .get(room_id2)
+        }
+        expected_account_data_keys = {
+            "org.matrix.roorarraz",
+            "org.matrix.roorarraz2",
+        }
+        if tag_action == TagAction.ADD:
+            expected_account_data_keys.add(AccountDataTypes.TAG)
+        self.assertIncludes(
+            account_data_map.keys(),
+            expected_account_data_keys,
+            exact=True,
+        )
+        self.assertEqual(account_data_map["org.matrix.roorarraz"], {"roo": "rar"})
+        self.assertEqual(account_data_map["org.matrix.roorarraz2"], {"roo": "rar"})
+        if tag_action == TagAction.ADD:
+            self.assertEqual(
+                account_data_map[AccountDataTypes.TAG],
+                {"tags": {"m.favourite": {}, "m.server_notice": {}}},
+            )
+        elif tag_action == TagAction.REMOVE:
+            # Since we never told the client about the room tags, we don't need to say
+            # anything if there are no tags now (the client doesn't need an update).
+            self.assertIsNone(
+                account_data_map.get(AccountDataTypes.TAG),
+                account_data_map,
+            )
+        else:
+            assert_never(tag_action)
+
+    @parameterized.expand(
+        [
+            ("add tags", TagAction.ADD),
+            ("remove tags", TagAction.REMOVE),
+        ]
+    )
+    def test_room_account_data_incremental_sync_out_of_range_previously(
+        self, test_description: str, tag_action: TagAction
+    ) -> None:
+        """Tests that we don't return account data for rooms that fall out of
+        range, but then do send all account data that has changed they're back in range.
+
+        (HaveSentRoomFlag.PREVIOUSLY)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a room and add some room account data
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                account_data_type="org.matrix.roorarraz",
+                content={"roo": "rar"},
+            )
+        )
+        # Add a room tag to mark the room as a favourite
+        self.get_success(
+            self.account_data_handler.add_tag_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                tag="m.favourite",
+                content={},
+            )
+        )
+
+        # Create another room with some room account data
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                account_data_type="org.matrix.roorarraz",
+                content={"roo": "rar"},
+            )
+        )
+        # Add a room tag to mark the room as a favourite
+        self.get_success(
+            self.account_data_handler.add_tag_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                tag="m.favourite",
+                content={},
+            )
+        )
+
+        # Make an initial Sliding Sync request for only room1 and room2.
+        sync_body = {
+            "lists": {},
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+                room_id2: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+            },
+            "extensions": {
+                "account_data": {
+                    "enabled": True,
+                    "rooms": [room_id1, room_id2],
+                }
+            },
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Both rooms show up because we have a room subscription for each and they're
+        # requested in the `account_data` extension.
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            {room_id1, room_id2},
+            exact=True,
+        )
+
+        # Add some other room account data
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id1,
+                account_data_type="org.matrix.roorarraz2",
+                content={"roo": "rar"},
+            )
+        )
+        self.get_success(
+            self.account_data_handler.add_account_data_to_room(
+                user_id=user1_id,
+                room_id=room_id2,
+                account_data_type="org.matrix.roorarraz2",
+                content={"roo": "rar"},
+            )
+        )
+        if tag_action == TagAction.ADD:
+            # Add another room tag
+            self.get_success(
+                self.account_data_handler.add_tag_to_room(
+                    user_id=user1_id,
+                    room_id=room_id1,
+                    tag="m.server_notice",
+                    content={},
+                )
+            )
+            self.get_success(
+                self.account_data_handler.add_tag_to_room(
+                    user_id=user1_id,
+                    room_id=room_id2,
+                    tag="m.server_notice",
+                    content={},
+                )
+            )
+        elif tag_action == TagAction.REMOVE:
+            # Remove the room tag
+            self.get_success(
+                self.account_data_handler.remove_tag_from_room(
+                    user_id=user1_id,
+                    room_id=room_id1,
+                    tag="m.favourite",
+                )
+            )
+            self.get_success(
+                self.account_data_handler.remove_tag_from_room(
+                    user_id=user1_id,
+                    room_id=room_id2,
+                    tag="m.favourite",
+                )
+            )
+        else:
+            assert_never(tag_action)
+
+        # Make an incremental Sliding Sync request for just room1
+        response_body, from_token = self.do_sync(
+            {
+                **sync_body,
+                "room_subscriptions": {
+                    room_id1: {
+                        "required_state": [],
+                        "timeline_limit": 0,
+                    },
+                },
+            },
+            since=from_token,
+            tok=user1_tok,
+        )
+
+        # Only room1 shows up because we only have a room subscription for room1 now.
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            {room_id1},
+            exact=True,
+        )
+
+        # Make an incremental Sliding Sync request for just room2 now
+        response_body, from_token = self.do_sync(
+            {
+                **sync_body,
+                "room_subscriptions": {
+                    room_id2: {
+                        "required_state": [],
+                        "timeline_limit": 0,
+                    },
+                },
+            },
+            since=from_token,
+            tok=user1_tok,
+        )
+
+        # Only room2 shows up because we only have a room subscription for room2 now.
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            {room_id2},
+            exact=True,
+        )
+
+        self.assertIsNotNone(response_body["extensions"]["account_data"].get("global"))
+        # Check for room account data for room2
+        self.assertIncludes(
+            response_body["extensions"]["account_data"].get("rooms").keys(),
+            {room_id2},
+            exact=True,
+        )
+        # We should see any room account data updates for room2 since the last
+        # time we saw it down sync
+        account_data_map = {
+            event["type"]: event["content"]
+            for event in response_body["extensions"]["account_data"]
+            .get("rooms")
+            .get(room_id2)
+        }
+        self.assertIncludes(
+            account_data_map.keys(),
+            {"org.matrix.roorarraz2", AccountDataTypes.TAG},
+            exact=True,
+        )
+        self.assertEqual(account_data_map["org.matrix.roorarraz2"], {"roo": "rar"})
+        if tag_action == TagAction.ADD:
+            self.assertEqual(
+                account_data_map[AccountDataTypes.TAG],
+                {"tags": {"m.favourite": {}, "m.server_notice": {}}},
+            )
+        elif tag_action == TagAction.REMOVE:
+            # If we previously showed the client that the room has tags, when it no
+            # longer has tags, we need to show them an empty map.
+            self.assertEqual(
+                account_data_map[AccountDataTypes.TAG],
+                {"tags": {}},
+            )
+        else:
+            assert_never(tag_action)
+
+    def test_wait_for_new_data(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive.
+
+        (Only applies to incremental syncs with a `timeout` specified)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "account_data": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make an incremental Sliding Sync request with the account_data extension enabled
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?timeout=10000&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Bump the global account data to trigger new results
+        self.get_success(
+            self.account_data_handler.add_account_data_for_user(
+                user1_id,
+                "org.matrix.foobarbaz",
+                {"foo": "bar"},
+            )
+        )
+        # Should respond before the 10 second timeout
+        channel.await_result(timeout_ms=3000)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # We should see the global account data update
+        self.assertIncludes(
+            {
+                global_event["type"]
+                for global_event in channel.json_body["extensions"]["account_data"].get(
+                    "global"
+                )
+            },
+            {"org.matrix.foobarbaz"},
+            exact=True,
+        )
+        self.assertIncludes(
+            channel.json_body["extensions"]["account_data"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_wait_for_new_data_timeout(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive but
+        no data ever arrives so we timeout. We're also making sure that the default data
+        from the account_data extension doesn't trigger a false-positive for new data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "account_data": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?timeout=10000&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Wake-up `notifier.wait_for_events(...)` that will cause us test
+        # `SlidingSyncResult.__bool__` for new results.
+        self._bump_notifier_wait_for_events(
+            user1_id,
+            # We choose `StreamKeyType.PRESENCE` because we're testing for account data
+            # and don't want to contaminate the account data results using
+            # `StreamKeyType.ACCOUNT_DATA`.
+            wake_stream_key=StreamKeyType.PRESENCE,
+        )
+        # Block for a little bit more to ensure we don't see any new results.
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=4000)
+        # Wait for the sync to complete (wait for the rest of the 10 second timeout,
+        # 5000 + 4000 + 1200 > 10000)
+        channel.await_result(timeout_ms=1200)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        self.assertIsNotNone(
+            channel.json_body["extensions"]["account_data"].get("global")
+        )
+        self.assertIsNotNone(
+            channel.json_body["extensions"]["account_data"].get("rooms")
+        )
diff --git a/tests/rest/client/sliding_sync/test_extension_e2ee.py b/tests/rest/client/sliding_sync/test_extension_e2ee.py
new file mode 100644
index 0000000000..7ce6592d8f
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_extension_e2ee.py
@@ -0,0 +1,459 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+
+from parameterized import parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.rest.client import devices, login, room, sync
+from synapse.server import HomeServer
+from synapse.types import JsonDict, StreamKeyType
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+from tests.server import TimedOutException
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase):
+    """Tests for the e2ee sliding sync extension"""
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+        devices.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.e2e_keys_handler = hs.get_e2e_keys_handler()
+
+        super().prepare(reactor, clock, hs)
+
+    def test_no_data_initial_sync(self) -> None:
+        """
+        Test that enabling e2ee extension works during an intitial sync, even if there
+        is no-data
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Make an initial Sliding Sync request with the e2ee extension enabled
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "e2ee": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Device list updates are only present for incremental syncs
+        self.assertIsNone(response_body["extensions"]["e2ee"].get("device_lists"))
+
+        # Both of these should be present even when empty
+        self.assertEqual(
+            response_body["extensions"]["e2ee"]["device_one_time_keys_count"],
+            {
+                # This is always present because of
+                # https://github.com/element-hq/element-android/issues/3725 and
+                # https://github.com/matrix-org/synapse/issues/10456
+                "signed_curve25519": 0
+            },
+        )
+        self.assertEqual(
+            response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"],
+            [],
+        )
+
+    def test_no_data_incremental_sync(self) -> None:
+        """
+        Test that enabling e2ee extension works during an incremental sync, even if
+        there is no-data
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "e2ee": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make an incremental Sliding Sync request with the e2ee extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # Device list shows up for incremental syncs
+        self.assertEqual(
+            response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"),
+            [],
+        )
+        self.assertEqual(
+            response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
+            [],
+        )
+
+        # Both of these should be present even when empty
+        self.assertEqual(
+            response_body["extensions"]["e2ee"]["device_one_time_keys_count"],
+            {
+                # Note that "signed_curve25519" is always returned in key count responses
+                # regardless of whether we uploaded any keys for it. This is necessary until
+                # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
+                #
+                # Also related:
+                # https://github.com/element-hq/element-android/issues/3725 and
+                # https://github.com/matrix-org/synapse/issues/10456
+                "signed_curve25519": 0
+            },
+        )
+        self.assertEqual(
+            response_body["extensions"]["e2ee"]["device_unused_fallback_key_types"],
+            [],
+        )
+
+    def test_wait_for_new_data(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive.
+
+        (Only applies to incremental syncs with a `timeout` specified)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        test_device_id = "TESTDEVICE"
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass", device_id=test_device_id)
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+        self.helper.join(room_id, user3_id, tok=user3_tok)
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "e2ee": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Bump the device lists to trigger new results
+        # Have user3 update their device list
+        device_update_channel = self.make_request(
+            "PUT",
+            f"/devices/{test_device_id}",
+            {
+                "display_name": "New Device Name",
+            },
+            access_token=user3_tok,
+        )
+        self.assertEqual(
+            device_update_channel.code, 200, device_update_channel.json_body
+        )
+        # Should respond before the 10 second timeout
+        channel.await_result(timeout_ms=3000)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # We should see the device list update
+        self.assertEqual(
+            channel.json_body["extensions"]["e2ee"]
+            .get("device_lists", {})
+            .get("changed"),
+            [user3_id],
+        )
+        self.assertEqual(
+            channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
+            [],
+        )
+
+    def test_wait_for_new_data_timeout(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive but
+        no data ever arrives so we timeout. We're also making sure that the default data
+        from the E2EE extension doesn't trigger a false-positive for new data (see
+        `device_one_time_keys_count.signed_curve25519`).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "e2ee": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?timeout=10000&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Wake-up `notifier.wait_for_events(...)` that will cause us test
+        # `SlidingSyncResult.__bool__` for new results.
+        self._bump_notifier_wait_for_events(
+            user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
+        )
+        # Block for a little bit more to ensure we don't see any new results.
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=4000)
+        # Wait for the sync to complete (wait for the rest of the 10 second timeout,
+        # 5000 + 4000 + 1200 > 10000)
+        channel.await_result(timeout_ms=1200)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Device lists are present for incremental syncs but empty because no device changes
+        self.assertEqual(
+            channel.json_body["extensions"]["e2ee"]
+            .get("device_lists", {})
+            .get("changed"),
+            [],
+        )
+        self.assertEqual(
+            channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
+            [],
+        )
+
+        # Both of these should be present even when empty
+        self.assertEqual(
+            channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"],
+            {
+                # Note that "signed_curve25519" is always returned in key count responses
+                # regardless of whether we uploaded any keys for it. This is necessary until
+                # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
+                #
+                # Also related:
+                # https://github.com/element-hq/element-android/issues/3725 and
+                # https://github.com/matrix-org/synapse/issues/10456
+                "signed_curve25519": 0
+            },
+        )
+        self.assertEqual(
+            channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"],
+            [],
+        )
+
+    def test_device_lists(self) -> None:
+        """
+        Test that device list updates are included in the response
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        test_device_id = "TESTDEVICE"
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass", device_id=test_device_id)
+
+        user4_id = self.register_user("user4", "pass")
+        user4_tok = self.login(user4_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+        self.helper.join(room_id, user3_id, tok=user3_tok)
+        self.helper.join(room_id, user4_id, tok=user4_tok)
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "e2ee": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Have user3 update their device list
+        channel = self.make_request(
+            "PUT",
+            f"/devices/{test_device_id}",
+            {
+                "display_name": "New Device Name",
+            },
+            access_token=user3_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # User4 leaves the room
+        self.helper.leave(room_id, user4_id, tok=user4_tok)
+
+        # Make an incremental Sliding Sync request with the e2ee extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # Device list updates show up
+        self.assertEqual(
+            response_body["extensions"]["e2ee"].get("device_lists", {}).get("changed"),
+            [user3_id],
+        )
+        self.assertEqual(
+            response_body["extensions"]["e2ee"].get("device_lists", {}).get("left"),
+            [user4_id],
+        )
+
+    def test_device_one_time_keys_count(self) -> None:
+        """
+        Test that `device_one_time_keys_count` are included in the response
+        """
+        test_device_id = "TESTDEVICE"
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass", device_id=test_device_id)
+
+        # Upload one time keys for the user/device
+        keys: JsonDict = {
+            "alg1:k1": "key1",
+            "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}},
+            "alg2:k3": {"key": "key3"},
+        }
+        upload_keys_response = self.get_success(
+            self.e2e_keys_handler.upload_keys_for_user(
+                user1_id, test_device_id, {"one_time_keys": keys}
+            )
+        )
+        self.assertDictEqual(
+            upload_keys_response,
+            {
+                "one_time_key_counts": {
+                    "alg1": 1,
+                    "alg2": 2,
+                    # Note that "signed_curve25519" is always returned in key count responses
+                    # regardless of whether we uploaded any keys for it. This is necessary until
+                    # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
+                    #
+                    # Also related:
+                    # https://github.com/element-hq/element-android/issues/3725 and
+                    # https://github.com/matrix-org/synapse/issues/10456
+                    "signed_curve25519": 0,
+                }
+            },
+        )
+
+        # Make a Sliding Sync request with the e2ee extension enabled
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "e2ee": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Check for those one time key counts
+        self.assertEqual(
+            response_body["extensions"]["e2ee"].get("device_one_time_keys_count"),
+            {
+                "alg1": 1,
+                "alg2": 2,
+                # Note that "signed_curve25519" is always returned in key count responses
+                # regardless of whether we uploaded any keys for it. This is necessary until
+                # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
+                #
+                # Also related:
+                # https://github.com/element-hq/element-android/issues/3725 and
+                # https://github.com/matrix-org/synapse/issues/10456
+                "signed_curve25519": 0,
+            },
+        )
+
+    def test_device_unused_fallback_key_types(self) -> None:
+        """
+        Test that `device_unused_fallback_key_types` are included in the response
+        """
+        test_device_id = "TESTDEVICE"
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass", device_id=test_device_id)
+
+        # We shouldn't have any unused fallback keys yet
+        res = self.get_success(
+            self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id)
+        )
+        self.assertEqual(res, [])
+
+        # Upload a fallback key for the user/device
+        self.get_success(
+            self.e2e_keys_handler.upload_keys_for_user(
+                user1_id,
+                test_device_id,
+                {"fallback_keys": {"alg1:k1": "fallback_key1"}},
+            )
+        )
+        # We should now have an unused alg1 key
+        fallback_res = self.get_success(
+            self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id)
+        )
+        self.assertEqual(fallback_res, ["alg1"], fallback_res)
+
+        # Make a Sliding Sync request with the e2ee extension enabled
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "e2ee": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Check for the unused fallback key types
+        self.assertListEqual(
+            response_body["extensions"]["e2ee"].get("device_unused_fallback_key_types"),
+            ["alg1"],
+        )
diff --git a/tests/rest/client/sliding_sync/test_extension_receipts.py b/tests/rest/client/sliding_sync/test_extension_receipts.py
new file mode 100644
index 0000000000..6e7700b533
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_extension_receipts.py
@@ -0,0 +1,934 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+
+from parameterized import parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import EduTypes, ReceiptTypes
+from synapse.rest.client import login, receipts, room, sync
+from synapse.server import HomeServer
+from synapse.types import StreamKeyType
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+from tests.server import TimedOutException
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase):
+    """Tests for the receipts sliding sync extension"""
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+        receipts.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+
+        super().prepare(reactor, clock, hs)
+
+    def test_no_data_initial_sync(self) -> None:
+        """
+        Test that enabling the receipts extension works during an intitial sync,
+        even if there is no-data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Make an initial Sliding Sync request with the receipts extension enabled
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "receipts": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_no_data_incremental_sync(self) -> None:
+        """
+        Test that enabling receipts extension works during an incremental sync, even
+        if there is no-data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "receipts": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make an incremental Sliding Sync request with the receipts extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_receipts_initial_sync_with_timeline(self) -> None:
+        """
+        On initial sync, we only return receipts for events in a given room's timeline.
+
+        We also make sure that we only return receipts for rooms that we request and are
+        already being returned in the Sliding Sync response.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+        user4_id = self.register_user("user4", "pass")
+        user4_tok = self.login(user4_id, "pass")
+
+        # Create a room
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.join(room_id1, user3_id, tok=user3_tok)
+        self.helper.join(room_id1, user4_id, tok=user4_tok)
+        room1_event_response1 = self.helper.send(
+            room_id1, body="new event1", tok=user2_tok
+        )
+        room1_event_response2 = self.helper.send(
+            room_id1, body="new event2", tok=user2_tok
+        )
+        # User1 reads the last event
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}",
+            {},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User2 reads the last event
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}",
+            {},
+            access_token=user2_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User3 reads the first event
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
+            {},
+            access_token=user3_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User4 privately reads the last event (make sure this doesn't leak to the other users)
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ_PRIVATE}/{room1_event_response2['event_id']}",
+            {},
+            access_token=user4_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Create another room
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id2, user1_id, tok=user1_tok)
+        self.helper.join(room_id2, user3_id, tok=user3_tok)
+        self.helper.join(room_id2, user4_id, tok=user4_tok)
+        room2_event_response1 = self.helper.send(
+            room_id2, body="new event2", tok=user2_tok
+        )
+        # User1 reads the last event
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}",
+            {},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User2 reads the last event
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}",
+            {},
+            access_token=user2_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User4 privately reads the last event (make sure this doesn't leak to the other users)
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}",
+            {},
+            access_token=user4_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Make an initial Sliding Sync request with the receipts extension enabled
+        sync_body = {
+            "lists": {},
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [],
+                    # On initial sync, we only have receipts for events in the timeline
+                    "timeline_limit": 1,
+                }
+            },
+            "extensions": {
+                "receipts": {
+                    "enabled": True,
+                    "rooms": [room_id1, room_id2],
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Only the latest event in the room is in the timelie because the `timeline_limit` is 1
+        self.assertIncludes(
+            {
+                event["event_id"]
+                for event in response_body["rooms"][room_id1].get("timeline", [])
+            },
+            {room1_event_response2["event_id"]},
+            exact=True,
+            message=str(response_body["rooms"][room_id1]),
+        )
+
+        # Even though we requested room2, we only expect room1 to show up because that's
+        # the only room in the Sliding Sync response (room2 is not one of our room
+        # subscriptions or in a sliding window list).
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            {room_id1},
+            exact=True,
+        )
+        # Sanity check that it's the correct ephemeral event type
+        self.assertEqual(
+            response_body["extensions"]["receipts"]["rooms"][room_id1]["type"],
+            EduTypes.RECEIPT,
+        )
+        # We can see user1 and user2 read receipts
+        self.assertIncludes(
+            response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
+                room1_event_response2["event_id"]
+            ][ReceiptTypes.READ].keys(),
+            {user1_id, user2_id},
+            exact=True,
+        )
+        # User1 did not have a private read receipt and we shouldn't leak others'
+        # private read receipts
+        self.assertIncludes(
+            response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
+                room1_event_response2["event_id"]
+            ]
+            .get(ReceiptTypes.READ_PRIVATE, {})
+            .keys(),
+            set(),
+            exact=True,
+        )
+
+        # We shouldn't see receipts for event2 since it wasn't in the timeline and this is an initial sync
+        self.assertIsNone(
+            response_body["extensions"]["receipts"]["rooms"][room_id1]["content"].get(
+                room1_event_response1["event_id"]
+            )
+        )
+
+    def test_receipts_incremental_sync(self) -> None:
+        """
+        On incremental sync, we return all receipts in the token range for a given room
+        but only for rooms that we request and are being returned in the Sliding Sync
+        response.
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+
+        # Create room1
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.join(room_id1, user3_id, tok=user3_tok)
+        room1_event_response1 = self.helper.send(
+            room_id1, body="new event2", tok=user2_tok
+        )
+        # User2 reads the last event (before the `from_token`)
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
+            {},
+            access_token=user2_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Create room2
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id2, user1_id, tok=user1_tok)
+        room2_event_response1 = self.helper.send(
+            room_id2, body="new event2", tok=user2_tok
+        )
+        # User1 reads the last event (before the `from_token`)
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ}/{room2_event_response1['event_id']}",
+            {},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Create room3
+        room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id3, user1_id, tok=user1_tok)
+        self.helper.join(room_id3, user3_id, tok=user3_tok)
+        room3_event_response1 = self.helper.send(
+            room_id3, body="new event", tok=user2_tok
+        )
+
+        # Create room4
+        room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id4, user1_id, tok=user1_tok)
+        self.helper.join(room_id4, user3_id, tok=user3_tok)
+        event_response4 = self.helper.send(room_id4, body="new event", tok=user2_tok)
+        # User1 reads the last event (before the `from_token`)
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id4}/receipt/{ReceiptTypes.READ}/{event_response4['event_id']}",
+            {},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        sync_body = {
+            "lists": {},
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+                room_id3: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+                room_id4: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+            },
+            "extensions": {
+                "receipts": {
+                    "enabled": True,
+                    "rooms": [room_id1, room_id2, room_id3, room_id4],
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Add some more read receipts after the `from_token`
+        #
+        # User1 reads room1
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
+            {},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User1 privately reads room2
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id2}/receipt/{ReceiptTypes.READ_PRIVATE}/{room2_event_response1['event_id']}",
+            {},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User3 reads room3
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id3}/receipt/{ReceiptTypes.READ}/{room3_event_response1['event_id']}",
+            {},
+            access_token=user3_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # No activity for room4 after the `from_token`
+
+        # Make an incremental Sliding Sync request with the receipts extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # Even though we requested room2, we only expect rooms to show up if they are
+        # already in the Sliding Sync response. room4 doesn't show up because there is
+        # no activity after the `from_token`.
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            {room_id1, room_id3},
+            exact=True,
+        )
+
+        # Check room1:
+        #
+        # Sanity check that it's the correct ephemeral event type
+        self.assertEqual(
+            response_body["extensions"]["receipts"]["rooms"][room_id1]["type"],
+            EduTypes.RECEIPT,
+        )
+        # We only see that user1 has read something in room1 since the `from_token`
+        self.assertIncludes(
+            response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
+                room1_event_response1["event_id"]
+            ][ReceiptTypes.READ].keys(),
+            {user1_id},
+            exact=True,
+        )
+        # User1 did not send a private read receipt in this room and we shouldn't leak
+        # others' private read receipts
+        self.assertIncludes(
+            response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
+                room1_event_response1["event_id"]
+            ]
+            .get(ReceiptTypes.READ_PRIVATE, {})
+            .keys(),
+            set(),
+            exact=True,
+        )
+        # No events in the timeline since they were sent before the `from_token`
+        self.assertNotIn(room_id1, response_body["rooms"])
+
+        # Check room3:
+        #
+        # Sanity check that it's the correct ephemeral event type
+        self.assertEqual(
+            response_body["extensions"]["receipts"]["rooms"][room_id3]["type"],
+            EduTypes.RECEIPT,
+        )
+        # We only see that user3 has read something in room1 since the `from_token`
+        self.assertIncludes(
+            response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][
+                room3_event_response1["event_id"]
+            ][ReceiptTypes.READ].keys(),
+            {user3_id},
+            exact=True,
+        )
+        # User1 did not send a private read receipt in this room and we shouldn't leak
+        # others' private read receipts
+        self.assertIncludes(
+            response_body["extensions"]["receipts"]["rooms"][room_id3]["content"][
+                room3_event_response1["event_id"]
+            ]
+            .get(ReceiptTypes.READ_PRIVATE, {})
+            .keys(),
+            set(),
+            exact=True,
+        )
+        # No events in the timeline since they were sent before the `from_token`
+        self.assertNotIn(room_id3, response_body["rooms"])
+
+    def test_receipts_incremental_sync_all_live_receipts(self) -> None:
+        """
+        On incremental sync, we return all receipts in the token range for a given room
+        even if they are not in the timeline.
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create room1
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {},
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [],
+                    # The timeline will only include event2
+                    "timeline_limit": 1,
+                },
+            },
+            "extensions": {
+                "receipts": {
+                    "enabled": True,
+                    "rooms": [room_id1],
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        room1_event_response1 = self.helper.send(
+            room_id1, body="new event1", tok=user2_tok
+        )
+        room1_event_response2 = self.helper.send(
+            room_id1, body="new event2", tok=user2_tok
+        )
+
+        # User1 reads event1
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response1['event_id']}",
+            {},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User2 reads event2
+        channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id1}/receipt/{ReceiptTypes.READ}/{room1_event_response2['event_id']}",
+            {},
+            access_token=user2_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Make an incremental Sliding Sync request with the receipts extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # We should see room1 because it has receipts in the token range
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            {room_id1},
+            exact=True,
+        )
+        # Sanity check that it's the correct ephemeral event type
+        self.assertEqual(
+            response_body["extensions"]["receipts"]["rooms"][room_id1]["type"],
+            EduTypes.RECEIPT,
+        )
+        # We should see all receipts in the token range regardless of whether the events
+        # are in the timeline
+        self.assertIncludes(
+            response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
+                room1_event_response1["event_id"]
+            ][ReceiptTypes.READ].keys(),
+            {user1_id},
+            exact=True,
+        )
+        self.assertIncludes(
+            response_body["extensions"]["receipts"]["rooms"][room_id1]["content"][
+                room1_event_response2["event_id"]
+            ][ReceiptTypes.READ].keys(),
+            {user2_id},
+            exact=True,
+        )
+        # Only the latest event in the timeline because the `timeline_limit` is 1
+        self.assertIncludes(
+            {
+                event["event_id"]
+                for event in response_body["rooms"][room_id1].get("timeline", [])
+            },
+            {room1_event_response2["event_id"]},
+            exact=True,
+            message=str(response_body["rooms"][room_id1]),
+        )
+
+    def test_wait_for_new_data(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive.
+
+        (Only applies to incremental syncs with a `timeout` specified)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+        event_response = self.helper.send(room_id, body="new event", tok=user2_tok)
+
+        sync_body = {
+            "lists": {},
+            "room_subscriptions": {
+                room_id: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+            },
+            "extensions": {
+                "receipts": {
+                    "enabled": True,
+                    "rooms": [room_id],
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make an incremental Sliding Sync request with the receipts extension enabled
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?timeout=10000&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Bump the receipts to trigger new results
+        receipt_channel = self.make_request(
+            "POST",
+            f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}",
+            {},
+            access_token=user2_tok,
+        )
+        self.assertEqual(receipt_channel.code, 200, receipt_channel.json_body)
+        # Should respond before the 10 second timeout
+        channel.await_result(timeout_ms=3000)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # We should see the new receipt
+        self.assertIncludes(
+            channel.json_body.get("extensions", {})
+            .get("receipts", {})
+            .get("rooms", {})
+            .keys(),
+            {room_id},
+            exact=True,
+            message=str(channel.json_body),
+        )
+        self.assertIncludes(
+            channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][
+                event_response["event_id"]
+            ][ReceiptTypes.READ].keys(),
+            {user2_id},
+            exact=True,
+        )
+        # User1 did not send a private read receipt in this room and we shouldn't leak
+        # others' private read receipts
+        self.assertIncludes(
+            channel.json_body["extensions"]["receipts"]["rooms"][room_id]["content"][
+                event_response["event_id"]
+            ]
+            .get(ReceiptTypes.READ_PRIVATE, {})
+            .keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_wait_for_new_data_timeout(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive but
+        no data ever arrives so we timeout. We're also making sure that the default data
+        from the receipts extension doesn't trigger a false-positive for new data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "receipts": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?timeout=10000&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Wake-up `notifier.wait_for_events(...)` that will cause us test
+        # `SlidingSyncResult.__bool__` for new results.
+        self._bump_notifier_wait_for_events(
+            user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
+        )
+        # Block for a little bit more to ensure we don't see any new results.
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=4000)
+        # Wait for the sync to complete (wait for the rest of the 10 second timeout,
+        # 5000 + 4000 + 1200 > 10000)
+        channel.await_result(timeout_ms=1200)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        self.assertIncludes(
+            channel.json_body["extensions"]["receipts"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_receipts_incremental_sync_out_of_range(self) -> None:
+        """Tests that we don't return read receipts for rooms that fall out of
+        range, but then do send all read receipts once they're back in range.
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+        # Send a message and read receipt into room2
+        event_response = self.helper.send(room_id2, body="new event", tok=user2_tok)
+        room2_event_id = event_response["event_id"]
+
+        self.helper.send_read_receipt(room_id2, room2_event_id, tok=user1_tok)
+
+        # Now send a message into room1 so that it is at the top of the list
+        self.helper.send(room_id1, body="new event", tok=user2_tok)
+
+        # Make a SS request for only the top room.
+        sync_body = {
+            "lists": {
+                "main": {
+                    "ranges": [[0, 0]],
+                    "required_state": [],
+                    "timeline_limit": 5,
+                }
+            },
+            "extensions": {
+                "receipts": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # The receipt is in room2, but only room1 is returned, so we don't
+        # expect to get the receipt.
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+        # Move room2 into range.
+        self.helper.send(room_id2, body="new event", tok=user2_tok)
+
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # We expect to see the read receipt of room2, as that has the most
+        # recent update.
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            {room_id2},
+            exact=True,
+        )
+        receipt = response_body["extensions"]["receipts"]["rooms"][room_id2]
+        self.assertIncludes(
+            receipt["content"][room2_event_id][ReceiptTypes.READ].keys(),
+            {user1_id},
+            exact=True,
+        )
+
+        # Send a message into room1 to bump it to the top, but also send a
+        # receipt in room2
+        self.helper.send(room_id1, body="new event", tok=user2_tok)
+        self.helper.send_read_receipt(room_id2, room2_event_id, tok=user2_tok)
+
+        # We don't expect to see the new read receipt.
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+        # But if we send a new message into room2, we expect to get the missing receipts
+        self.helper.send(room_id2, body="new event", tok=user2_tok)
+
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            {room_id2},
+            exact=True,
+        )
+
+        # We should only see the new receipt
+        receipt = response_body["extensions"]["receipts"]["rooms"][room_id2]
+        self.assertIncludes(
+            receipt["content"][room2_event_id][ReceiptTypes.READ].keys(),
+            {user2_id},
+            exact=True,
+        )
+
+    def test_return_own_read_receipts(self) -> None:
+        """Test that we always send the user's own read receipts in initial
+        rooms, even if the receipts don't match events in the timeline..
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Send a message and read receipts into room1
+        event_response = self.helper.send(room_id1, body="new event", tok=user2_tok)
+        room1_event_id = event_response["event_id"]
+
+        self.helper.send_read_receipt(room_id1, room1_event_id, tok=user1_tok)
+        self.helper.send_read_receipt(room_id1, room1_event_id, tok=user2_tok)
+
+        # Now send a message so the above message is not in the timeline.
+        self.helper.send(room_id1, body="new event", tok=user2_tok)
+
+        # Make a SS request for only the latest message.
+        sync_body = {
+            "lists": {
+                "main": {
+                    "ranges": [[0, 0]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            },
+            "extensions": {
+                "receipts": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # We should get our own receipt in room1, even though its not in the
+        # timeline limit.
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            {room_id1},
+            exact=True,
+        )
+
+        # We should only see our read receipt, not the other user's.
+        receipt = response_body["extensions"]["receipts"]["rooms"][room_id1]
+        self.assertIncludes(
+            receipt["content"][room1_event_id][ReceiptTypes.READ].keys(),
+            {user1_id},
+            exact=True,
+        )
+
+    def test_read_receipts_expanded_timeline(self) -> None:
+        """Test that we get read receipts when we expand the timeline limit (`unstable_expanded_timeline`)."""
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Send a message and read receipt into room1
+        event_response = self.helper.send(room_id1, body="new event", tok=user2_tok)
+        room1_event_id = event_response["event_id"]
+
+        self.helper.send_read_receipt(room_id1, room1_event_id, tok=user2_tok)
+
+        # Now send a message so the above message is not in the timeline.
+        self.helper.send(room_id1, body="new event", tok=user2_tok)
+
+        # Make a SS request for only the latest message.
+        sync_body = {
+            "lists": {
+                "main": {
+                    "ranges": [[0, 0]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            },
+            "extensions": {
+                "receipts": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # We shouldn't see user2 read receipt, as its not in the timeline
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+        # Now do another request with a room subscription with an increased timeline limit
+        sync_body["room_subscriptions"] = {
+            room_id1: {
+                "required_state": [],
+                "timeline_limit": 2,
+            }
+        }
+
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # Assert that we did actually get an expanded timeline
+        room_response = response_body["rooms"][room_id1]
+        self.assertNotIn("initial", room_response)
+        self.assertEqual(room_response["unstable_expanded_timeline"], True)
+
+        # We should now see user2 read receipt, as its in the expanded timeline
+        self.assertIncludes(
+            response_body["extensions"]["receipts"].get("rooms").keys(),
+            {room_id1},
+            exact=True,
+        )
+
+        # We should only see our read receipt, not the other user's.
+        receipt = response_body["extensions"]["receipts"]["rooms"][room_id1]
+        self.assertIncludes(
+            receipt["content"][room1_event_id][ReceiptTypes.READ].keys(),
+            {user2_id},
+            exact=True,
+        )
diff --git a/tests/rest/client/sliding_sync/test_extension_to_device.py b/tests/rest/client/sliding_sync/test_extension_to_device.py
new file mode 100644
index 0000000000..790abb739d
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_extension_to_device.py
@@ -0,0 +1,295 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+from typing import List
+
+from parameterized import parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.rest.client import login, sendtodevice, sync
+from synapse.server import HomeServer
+from synapse.types import JsonDict, StreamKeyType
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+from tests.server import TimedOutException
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase):
+    """Tests for the to-device sliding sync extension"""
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        sync.register_servlets,
+        sendtodevice.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        super().prepare(reactor, clock, hs)
+
+    def _assert_to_device_response(
+        self, response_body: JsonDict, expected_messages: List[JsonDict]
+    ) -> str:
+        """Assert the sliding sync response was successful and has the expected
+        to-device messages.
+
+        Returns the next_batch token from the to-device section.
+        """
+        extensions = response_body["extensions"]
+        to_device = extensions["to_device"]
+        self.assertIsInstance(to_device["next_batch"], str)
+        self.assertEqual(to_device["events"], expected_messages)
+
+        return to_device["next_batch"]
+
+    def test_no_data(self) -> None:
+        """Test that enabling to-device extension works, even if there is
+        no-data
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "to_device": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # We expect no to-device messages
+        self._assert_to_device_response(response_body, [])
+
+    def test_data_initial_sync(self) -> None:
+        """Test that we get to-device messages when we don't specify a since
+        token"""
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass", "d1")
+        user2_id = self.register_user("u2", "pass")
+        user2_tok = self.login(user2_id, "pass", "d2")
+
+        # Send the to-device message
+        test_msg = {"foo": "bar"}
+        chan = self.make_request(
+            "PUT",
+            "/_matrix/client/r0/sendToDevice/m.test/1234",
+            content={"messages": {user1_id: {"d1": test_msg}}},
+            access_token=user2_tok,
+        )
+        self.assertEqual(chan.code, 200, chan.result)
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "to_device": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self._assert_to_device_response(
+            response_body,
+            [{"content": test_msg, "sender": user2_id, "type": "m.test"}],
+        )
+
+    def test_data_incremental_sync(self) -> None:
+        """Test that we get to-device messages over incremental syncs"""
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass", "d1")
+        user2_id = self.register_user("u2", "pass")
+        user2_tok = self.login(user2_id, "pass", "d2")
+
+        sync_body: JsonDict = {
+            "lists": {},
+            "extensions": {
+                "to_device": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # No to-device messages yet.
+        next_batch = self._assert_to_device_response(response_body, [])
+
+        test_msg = {"foo": "bar"}
+        chan = self.make_request(
+            "PUT",
+            "/_matrix/client/r0/sendToDevice/m.test/1234",
+            content={"messages": {user1_id: {"d1": test_msg}}},
+            access_token=user2_tok,
+        )
+        self.assertEqual(chan.code, 200, chan.result)
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "to_device": {
+                    "enabled": True,
+                    "since": next_batch,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        next_batch = self._assert_to_device_response(
+            response_body,
+            [{"content": test_msg, "sender": user2_id, "type": "m.test"}],
+        )
+
+        # The next sliding sync request should not include the to-device
+        # message.
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "to_device": {
+                    "enabled": True,
+                    "since": next_batch,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self._assert_to_device_response(response_body, [])
+
+        # An initial sliding sync request should not include the to-device
+        # message, as it should have been deleted
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "to_device": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self._assert_to_device_response(response_body, [])
+
+    def test_wait_for_new_data(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive.
+
+        (Only applies to incremental syncs with a `timeout` specified)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass", "d1")
+        user2_id = self.register_user("u2", "pass")
+        user2_tok = self.login(user2_id, "pass", "d2")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "to_device": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Bump the to-device messages to trigger new results
+        test_msg = {"foo": "bar"}
+        send_to_device_channel = self.make_request(
+            "PUT",
+            "/_matrix/client/r0/sendToDevice/m.test/1234",
+            content={"messages": {user1_id: {"d1": test_msg}}},
+            access_token=user2_tok,
+        )
+        self.assertEqual(
+            send_to_device_channel.code, 200, send_to_device_channel.result
+        )
+        # Should respond before the 10 second timeout
+        channel.await_result(timeout_ms=3000)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        self._assert_to_device_response(
+            channel.json_body,
+            [{"content": test_msg, "sender": user2_id, "type": "m.test"}],
+        )
+
+    def test_wait_for_new_data_timeout(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive but
+        no data ever arrives so we timeout. We're also making sure that the default data
+        from the To-Device extension doesn't trigger a false-positive for new data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "to_device": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Wake-up `notifier.wait_for_events(...)` that will cause us test
+        # `SlidingSyncResult.__bool__` for new results.
+        self._bump_notifier_wait_for_events(
+            user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
+        )
+        # Block for a little bit more to ensure we don't see any new results.
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=4000)
+        # Wait for the sync to complete (wait for the rest of the 10 second timeout,
+        # 5000 + 4000 + 1200 > 10000)
+        channel.await_result(timeout_ms=1200)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        self._assert_to_device_response(channel.json_body, [])
diff --git a/tests/rest/client/sliding_sync/test_extension_typing.py b/tests/rest/client/sliding_sync/test_extension_typing.py
new file mode 100644
index 0000000000..f87c3c8b17
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_extension_typing.py
@@ -0,0 +1,500 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+
+from parameterized import parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import EduTypes
+from synapse.rest.client import login, room, sync
+from synapse.server import HomeServer
+from synapse.types import StreamKeyType
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+from tests.server import TimedOutException
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncTypingExtensionTestCase(SlidingSyncBase):
+    """Tests for the typing notification sliding sync extension"""
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+
+        super().prepare(reactor, clock, hs)
+
+    def test_no_data_initial_sync(self) -> None:
+        """
+        Test that enabling the typing extension works during an intitial sync,
+        even if there is no-data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Make an initial Sliding Sync request with the typing extension enabled
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "typing": {
+                    "enabled": True,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        self.assertIncludes(
+            response_body["extensions"]["typing"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_no_data_incremental_sync(self) -> None:
+        """
+        Test that enabling typing extension works during an incremental sync, even
+        if there is no-data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "typing": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make an incremental Sliding Sync request with the typing extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        self.assertIncludes(
+            response_body["extensions"]["typing"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
+
+    def test_typing_initial_sync(self) -> None:
+        """
+        On initial sync, we return all typing notifications for rooms that we request
+        and are being returned in the Sliding Sync response.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+        user4_id = self.register_user("user4", "pass")
+        user4_tok = self.login(user4_id, "pass")
+
+        # Create a room
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.join(room_id1, user3_id, tok=user3_tok)
+        self.helper.join(room_id1, user4_id, tok=user4_tok)
+        # User1 starts typing in room1
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id1}/typing/{user1_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User2 starts typing in room1
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id1}/typing/{user2_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user2_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Create another room
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id2, user1_id, tok=user1_tok)
+        self.helper.join(room_id2, user3_id, tok=user3_tok)
+        self.helper.join(room_id2, user4_id, tok=user4_tok)
+        # User1 starts typing in room2
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id2}/typing/{user1_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User2 starts typing in room2
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id2}/typing/{user2_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user2_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Make an initial Sliding Sync request with the typing extension enabled
+        sync_body = {
+            "lists": {},
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            },
+            "extensions": {
+                "typing": {
+                    "enabled": True,
+                    "rooms": [room_id1, room_id2],
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Even though we requested room2, we only expect room1 to show up because that's
+        # the only room in the Sliding Sync response (room2 is not one of our room
+        # subscriptions or in a sliding window list).
+        self.assertIncludes(
+            response_body["extensions"]["typing"].get("rooms").keys(),
+            {room_id1},
+            exact=True,
+        )
+        # Sanity check that it's the correct ephemeral event type
+        self.assertEqual(
+            response_body["extensions"]["typing"]["rooms"][room_id1]["type"],
+            EduTypes.TYPING,
+        )
+        # We can see user1 and user2 typing
+        self.assertIncludes(
+            set(
+                response_body["extensions"]["typing"]["rooms"][room_id1]["content"][
+                    "user_ids"
+                ]
+            ),
+            {user1_id, user2_id},
+            exact=True,
+        )
+
+    def test_typing_incremental_sync(self) -> None:
+        """
+        On incremental sync, we return all typing notifications in the token range for a
+        given room but only for rooms that we request and are being returned in the
+        Sliding Sync response.
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+
+        # Create room1
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.join(room_id1, user3_id, tok=user3_tok)
+        # User2 starts typing in room1
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id1}/typing/{user2_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user2_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Create room2
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id2, user1_id, tok=user1_tok)
+        # User1 starts typing in room2 (before the `from_token`)
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id2}/typing/{user1_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Create room3
+        room_id3 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id3, user1_id, tok=user1_tok)
+        self.helper.join(room_id3, user3_id, tok=user3_tok)
+
+        # Create room4
+        room_id4 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id4, user1_id, tok=user1_tok)
+        self.helper.join(room_id4, user3_id, tok=user3_tok)
+        # User1 starts typing in room4 (before the `from_token`)
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id4}/typing/{user1_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Advance time so all of the typing notifications timeout before we make our
+        # Sliding Sync requests. Even though these are sent before the `from_token`, the
+        # typing code only keeps track of stream position of the latest typing
+        # notification so "old" typing notifications that are still "alive" (haven't
+        # timed out) can appear in the response.
+        self.reactor.advance(36)
+
+        sync_body = {
+            "lists": {},
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+                room_id3: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+                room_id4: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+            },
+            "extensions": {
+                "typing": {
+                    "enabled": True,
+                    "rooms": [room_id1, room_id2, room_id3, room_id4],
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Add some more typing notifications after the `from_token`
+        #
+        # User1 starts typing in room1
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id1}/typing/{user1_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User1 starts typing in room2
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id2}/typing/{user1_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # User3 starts typing in room3
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id3}/typing/{user3_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user3_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        # No activity for room4 after the `from_token`
+
+        # Make an incremental Sliding Sync request with the typing extension enabled
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # Even though we requested room2, we only expect rooms to show up if they are
+        # already in the Sliding Sync response. room4 doesn't show up because there is
+        # no activity after the `from_token`.
+        self.assertIncludes(
+            response_body["extensions"]["typing"].get("rooms").keys(),
+            {room_id1, room_id3},
+            exact=True,
+        )
+
+        # Check room1:
+        #
+        # Sanity check that it's the correct ephemeral event type
+        self.assertEqual(
+            response_body["extensions"]["typing"]["rooms"][room_id1]["type"],
+            EduTypes.TYPING,
+        )
+        # We only see that user1 is typing in room1 since the `from_token`
+        self.assertIncludes(
+            set(
+                response_body["extensions"]["typing"]["rooms"][room_id1]["content"][
+                    "user_ids"
+                ]
+            ),
+            {user1_id},
+            exact=True,
+        )
+
+        # Check room3:
+        #
+        # Sanity check that it's the correct ephemeral event type
+        self.assertEqual(
+            response_body["extensions"]["typing"]["rooms"][room_id3]["type"],
+            EduTypes.TYPING,
+        )
+        # We only see that user3 is typing in room1 since the `from_token`
+        self.assertIncludes(
+            set(
+                response_body["extensions"]["typing"]["rooms"][room_id3]["content"][
+                    "user_ids"
+                ]
+            ),
+            {user3_id},
+            exact=True,
+        )
+
+    def test_wait_for_new_data(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive.
+
+        (Only applies to incremental syncs with a `timeout` specified)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {},
+            "room_subscriptions": {
+                room_id: {
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+            },
+            "extensions": {
+                "typing": {
+                    "enabled": True,
+                    "rooms": [room_id],
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make an incremental Sliding Sync request with the typing extension enabled
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?timeout=10000&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Bump the typing status to trigger new results
+        typing_channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id}/typing/{user2_id}",
+            b'{"typing": true, "timeout": 30000}',
+            access_token=user2_tok,
+        )
+        self.assertEqual(typing_channel.code, 200, typing_channel.json_body)
+        # Should respond before the 10 second timeout
+        channel.await_result(timeout_ms=3000)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # We should see the new typing notification
+        self.assertIncludes(
+            channel.json_body.get("extensions", {})
+            .get("typing", {})
+            .get("rooms", {})
+            .keys(),
+            {room_id},
+            exact=True,
+            message=str(channel.json_body),
+        )
+        self.assertIncludes(
+            set(
+                channel.json_body["extensions"]["typing"]["rooms"][room_id]["content"][
+                    "user_ids"
+                ]
+            ),
+            {user2_id},
+            exact=True,
+        )
+
+    def test_wait_for_new_data_timeout(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive but
+        no data ever arrives so we timeout. We're also making sure that the default data
+        from the typing extension doesn't trigger a false-positive for new data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        sync_body = {
+            "lists": {},
+            "extensions": {
+                "typing": {
+                    "enabled": True,
+                }
+            },
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?timeout=10000&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Wake-up `notifier.wait_for_events(...)` that will cause us test
+        # `SlidingSyncResult.__bool__` for new results.
+        self._bump_notifier_wait_for_events(
+            user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
+        )
+        # Block for a little bit more to ensure we don't see any new results.
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=4000)
+        # Wait for the sync to complete (wait for the rest of the 10 second timeout,
+        # 5000 + 4000 + 1200 > 10000)
+        channel.await_result(timeout_ms=1200)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        self.assertIncludes(
+            channel.json_body["extensions"]["typing"].get("rooms").keys(),
+            set(),
+            exact=True,
+        )
diff --git a/tests/rest/client/sliding_sync/test_extensions.py b/tests/rest/client/sliding_sync/test_extensions.py
new file mode 100644
index 0000000000..30230e5c4b
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_extensions.py
@@ -0,0 +1,306 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+from typing import Literal
+
+from parameterized import parameterized, parameterized_class
+from typing_extensions import assert_never
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import ReceiptTypes
+from synapse.rest.client import login, receipts, room, sync
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncExtensionsTestCase(SlidingSyncBase):
+    """
+    Test general extensions behavior in the Sliding Sync API. Each extension has their
+    own suite of tests in their own file as well.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+        receipts.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.storage_controllers = hs.get_storage_controllers()
+        self.account_data_handler = hs.get_account_data_handler()
+
+        super().prepare(reactor, clock, hs)
+
+    # Any extensions that use `lists`/`rooms` should be tested here
+    @parameterized.expand([("account_data",), ("receipts",), ("typing",)])
+    def test_extensions_lists_rooms_relevant_rooms(
+        self,
+        extension_name: Literal["account_data", "receipts", "typing"],
+    ) -> None:
+        """
+        With various extensions, test out requesting different variations of
+        `lists`/`rooms`.
+
+        Stresses `SlidingSyncHandler.find_relevant_room_ids_for_extension(...)`
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create some rooms
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id4 = self.helper.create_room_as(user1_id, tok=user1_tok)
+        room_id5 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        room_id_to_human_name_map = {
+            room_id1: "room1",
+            room_id2: "room2",
+            room_id3: "room3",
+            room_id4: "room4",
+            room_id5: "room5",
+        }
+
+        for room_id in room_id_to_human_name_map.keys():
+            if extension_name == "account_data":
+                # Add some account data to each room
+                self.get_success(
+                    self.account_data_handler.add_account_data_to_room(
+                        user_id=user1_id,
+                        room_id=room_id,
+                        account_data_type="org.matrix.roorarraz",
+                        content={"roo": "rar"},
+                    )
+                )
+            elif extension_name == "receipts":
+                event_response = self.helper.send(
+                    room_id, body="new event", tok=user1_tok
+                )
+                # Read last event
+                channel = self.make_request(
+                    "POST",
+                    f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_response['event_id']}",
+                    {},
+                    access_token=user1_tok,
+                )
+                self.assertEqual(channel.code, 200, channel.json_body)
+            elif extension_name == "typing":
+                # Start a typing notification
+                channel = self.make_request(
+                    "PUT",
+                    f"/rooms/{room_id}/typing/{user1_id}",
+                    b'{"typing": true, "timeout": 30000}',
+                    access_token=user1_tok,
+                )
+                self.assertEqual(channel.code, 200, channel.json_body)
+            else:
+                assert_never(extension_name)
+
+        main_sync_body = {
+            "lists": {
+                # We expect this list range to include room5 and room4
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    # We set this to `1` because we're testing `receipts` which
+                    # interact with the `timeline`. With receipts, when a room
+                    # hasn't been sent down the connection before or it appears
+                    # as `initial: true`, we only include receipts for events in
+                    # the timeline to avoid bloating and blowing up the sync
+                    # response as the number of users in the room increases.
+                    # (this behavior is part of the spec)
+                    "timeline_limit": 1,
+                },
+                # We expect this list range to include room5, room4, room3
+                "bar-list": {
+                    "ranges": [[0, 2]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                },
+            },
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            },
+        }
+
+        # Mix lists and rooms
+        sync_body = {
+            **main_sync_body,
+            "extensions": {
+                extension_name: {
+                    "enabled": True,
+                    "lists": ["foo-list", "non-existent-list"],
+                    "rooms": [room_id1, room_id2, "!non-existent-room"],
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # room1: ✅ Requested via `rooms` and a room subscription exists
+        # room2: ❌ Requested via `rooms` but not in the response (from lists or room subscriptions)
+        # room3: ❌ Not requested
+        # room4: ✅ Shows up because requested via `lists` and list exists in the response
+        # room5: ✅ Shows up because requested via `lists` and list exists in the response
+        self.assertIncludes(
+            {
+                room_id_to_human_name_map[room_id]
+                for room_id in response_body["extensions"][extension_name]
+                .get("rooms")
+                .keys()
+            },
+            {"room1", "room4", "room5"},
+            exact=True,
+        )
+
+        # Try wildcards (this is the default)
+        sync_body = {
+            **main_sync_body,
+            "extensions": {
+                extension_name: {
+                    "enabled": True,
+                    # "lists": ["*"],
+                    # "rooms": ["*"],
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # room1: ✅ Shows up because of default `rooms` wildcard and is in one of the room subscriptions
+        # room2: ❌ Not requested
+        # room3: ✅ Shows up because of default `lists` wildcard and is in a list
+        # room4: ✅ Shows up because of default `lists` wildcard and is in a list
+        # room5: ✅ Shows up because of default `lists` wildcard and is in a list
+        self.assertIncludes(
+            {
+                room_id_to_human_name_map[room_id]
+                for room_id in response_body["extensions"][extension_name]
+                .get("rooms")
+                .keys()
+            },
+            {"room1", "room3", "room4", "room5"},
+            exact=True,
+        )
+
+        # Empty list will return nothing
+        sync_body = {
+            **main_sync_body,
+            "extensions": {
+                extension_name: {
+                    "enabled": True,
+                    "lists": [],
+                    "rooms": [],
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # room1: ❌ Not requested
+        # room2: ❌ Not requested
+        # room3: ❌ Not requested
+        # room4: ❌ Not requested
+        # room5: ❌ Not requested
+        self.assertIncludes(
+            {
+                room_id_to_human_name_map[room_id]
+                for room_id in response_body["extensions"][extension_name]
+                .get("rooms")
+                .keys()
+            },
+            set(),
+            exact=True,
+        )
+
+        # Try wildcard and none
+        sync_body = {
+            **main_sync_body,
+            "extensions": {
+                extension_name: {
+                    "enabled": True,
+                    "lists": ["*"],
+                    "rooms": [],
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # room1: ❌ Not requested
+        # room2: ❌ Not requested
+        # room3: ✅ Shows up because of default `lists` wildcard and is in a list
+        # room4: ✅ Shows up because of default `lists` wildcard and is in a list
+        # room5: ✅ Shows up because of default `lists` wildcard and is in a list
+        self.assertIncludes(
+            {
+                room_id_to_human_name_map[room_id]
+                for room_id in response_body["extensions"][extension_name]
+                .get("rooms")
+                .keys()
+            },
+            {"room3", "room4", "room5"},
+            exact=True,
+        )
+
+        # Try requesting a room that is only in a list
+        sync_body = {
+            **main_sync_body,
+            "extensions": {
+                extension_name: {
+                    "enabled": True,
+                    "lists": [],
+                    "rooms": [room_id5],
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # room1: ❌ Not requested
+        # room2: ❌ Not requested
+        # room3: ❌ Not requested
+        # room4: ❌ Not requested
+        # room5: ✅ Requested via `rooms` and is in a list
+        self.assertIncludes(
+            {
+                room_id_to_human_name_map[room_id]
+                for room_id in response_body["extensions"][extension_name]
+                .get("rooms")
+                .keys()
+            },
+            {"room5"},
+            exact=True,
+        )
diff --git a/tests/rest/client/sliding_sync/test_lists_filters.py b/tests/rest/client/sliding_sync/test_lists_filters.py
new file mode 100644
index 0000000000..c59f6aedc4
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_lists_filters.py
@@ -0,0 +1,1975 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+
+from parameterized import parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import (
+    EventContentFields,
+    EventTypes,
+    RoomTypes,
+)
+from synapse.api.room_versions import RoomVersions
+from synapse.events import StrippedStateEvent
+from synapse.rest.client import login, room, sync, tags
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncFiltersTestCase(SlidingSyncBase):
+    """
+    Test `filters` in the Sliding Sync API to make sure it includes/excludes rooms
+    correctly.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+        tags.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.event_sources = hs.get_event_sources()
+        self.storage_controllers = hs.get_storage_controllers()
+        self.account_data_handler = hs.get_account_data_handler()
+
+        super().prepare(reactor, clock, hs)
+
+    def test_multiple_filters_and_multiple_lists(self) -> None:
+        """
+        Test that filters apply to `lists` in various scenarios.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create a DM room
+        joined_dm_room_id = self._create_dm_room(
+            inviter_user_id=user1_id,
+            inviter_tok=user1_tok,
+            invitee_user_id=user2_id,
+            invitee_tok=user2_tok,
+            should_join_room=True,
+        )
+        invited_dm_room_id = self._create_dm_room(
+            inviter_user_id=user1_id,
+            inviter_tok=user1_tok,
+            invitee_user_id=user2_id,
+            invitee_tok=user2_tok,
+            should_join_room=False,
+        )
+
+        # Create a normal room
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+
+        # Create a room that user1 is invited to
+        invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                # Absence of filters does not imply "False" values
+                "all": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                    "filters": {},
+                },
+                # Test single truthy filter
+                "dms": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                    "filters": {"is_dm": True},
+                },
+                # Test single falsy filter
+                "non-dms": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                    "filters": {"is_dm": False},
+                },
+                # Test how multiple filters should stack (AND'd together)
+                "room-invites": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                    "filters": {"is_dm": False, "is_invite": True},
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make sure it has the lists we requested
+        self.assertIncludes(
+            response_body["lists"].keys(),
+            {"all", "dms", "non-dms", "room-invites"},
+            exact=True,
+        )
+
+        # Make sure the lists have the correct rooms
+        self.assertIncludes(
+            set(response_body["lists"]["all"]["ops"][0]["room_ids"]),
+            {
+                invite_room_id,
+                room_id,
+                invited_dm_room_id,
+                joined_dm_room_id,
+            },
+            exact=True,
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["dms"]["ops"][0]["room_ids"]),
+            {invited_dm_room_id, joined_dm_room_id},
+            exact=True,
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["non-dms"]["ops"][0]["room_ids"]),
+            {invite_room_id, room_id},
+            exact=True,
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["room-invites"]["ops"][0]["room_ids"]),
+            {invite_room_id},
+            exact=True,
+        )
+
+    def test_filters_regardless_of_membership_server_left_room(self) -> None:
+        """
+        Test that filters apply to rooms regardless of membership. We're also
+        compounding the problem by having all of the local users leave the room causing
+        our server to leave the room.
+
+        We want to make sure that if someone is filtering rooms, and leaves, you still
+        get that final update down sync that you left.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create a normal room
+        room_id = self.helper.create_room_as(user1_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+
+        # Create an encrypted space room
+        space_room_id = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user2_tok,
+        )
+        self.helper.join(space_room_id, user1_id, tok=user1_tok)
+
+        # Make an initial Sliding Sync request
+        sync_body = {
+            "lists": {
+                "all-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {},
+                },
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                    "filters": {
+                        "is_encrypted": True,
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make sure the response has the lists we requested
+        self.assertIncludes(
+            response_body["lists"].keys(),
+            {"all-list", "foo-list"},
+        )
+
+        # Make sure the lists have the correct rooms
+        self.assertIncludes(
+            set(response_body["lists"]["all-list"]["ops"][0]["room_ids"]),
+            {space_room_id, room_id},
+            exact=True,
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {space_room_id},
+            exact=True,
+        )
+
+        # Everyone leaves the encrypted space room
+        self.helper.leave(space_room_id, user1_id, tok=user1_tok)
+        self.helper.leave(space_room_id, user2_id, tok=user2_tok)
+
+        # Make an incremental Sliding Sync request
+        sync_body = {
+            "lists": {
+                "all-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {},
+                },
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                    "filters": {
+                        "is_encrypted": True,
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # Make sure the response has the lists we requested
+        self.assertIncludes(
+            response_body["lists"].keys(),
+            {"all-list", "foo-list"},
+            exact=True,
+        )
+
+        # Make sure the lists have the correct rooms even though we `newly_left`
+        self.assertIncludes(
+            set(response_body["lists"]["all-list"]["ops"][0]["room_ids"]),
+            {space_room_id, room_id},
+            exact=True,
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {space_room_id},
+            exact=True,
+        )
+
+    def test_filters_is_dm(self) -> None:
+        """
+        Test `filter.is_dm` for DM rooms
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create a normal room
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create a DM room
+        dm_room_id = self._create_dm_room(
+            inviter_user_id=user1_id,
+            inviter_tok=user1_tok,
+            invitee_user_id=user2_id,
+            invitee_tok=user2_tok,
+        )
+
+        # Try with `is_dm=True`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_dm": True,
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {dm_room_id},
+            exact=True,
+        )
+
+        # Try with `is_dm=False`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_dm": False,
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+    def test_filters_is_encrypted(self) -> None:
+        """
+        Test `filters.is_encrypted` for encrypted rooms
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create an unencrypted room
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create an encrypted room
+        encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.helper.send_state(
+            encrypted_room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+
+        # Try with `is_encrypted=True`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": True,
+                    },
+                },
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+        # No rooms are encrypted yet
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {encrypted_room_id},
+            exact=True,
+        )
+
+        # Try with `is_encrypted=False`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": False,
+                    },
+                },
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+        # No rooms are encrypted yet
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+    def test_filters_is_encrypted_server_left_room(self) -> None:
+        """
+        Test that we can apply a `filters.is_encrypted` against a room that everyone has left.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Get a token before we create any rooms
+        sync_body: JsonDict = {
+            "lists": {},
+        }
+        response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Create an unencrypted room
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # Leave the room
+        self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+        # Create an encrypted room
+        encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.helper.send_state(
+            encrypted_room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+        # Leave the room
+        self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok)
+
+        # Try with `is_encrypted=True`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": True,
+                    },
+                },
+            }
+        }
+        # Use an incremental sync so that the room is considered `newly_left` and shows
+        # up down sync
+        response_body, _ = self.do_sync(
+            sync_body, since=before_rooms_token, tok=user1_tok
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {encrypted_room_id},
+            exact=True,
+        )
+
+        # Try with `is_encrypted=False`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": False,
+                    },
+                },
+            }
+        }
+        # Use an incremental sync so that the room is considered `newly_left` and shows
+        # up down sync
+        response_body, _ = self.do_sync(
+            sync_body, since=before_rooms_token, tok=user1_tok
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+    def test_filters_is_encrypted_server_left_room2(self) -> None:
+        """
+        Test that we can apply a `filters.is_encrypted` against a room that everyone has
+        left.
+
+        There is still someone local who is invited to the rooms but that doesn't affect
+        whether the server is participating in the room (users need to be joined).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        _user2_tok = self.login(user2_id, "pass")
+
+        # Get a token before we create any rooms
+        sync_body: JsonDict = {
+            "lists": {},
+        }
+        response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Create an unencrypted room
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # Invite user2
+        self.helper.invite(room_id, targ=user2_id, tok=user1_tok)
+        # User1 leaves the room
+        self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+        # Create an encrypted room
+        encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.helper.send_state(
+            encrypted_room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+        # Invite user2
+        self.helper.invite(encrypted_room_id, targ=user2_id, tok=user1_tok)
+        # User1 leaves the room
+        self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok)
+
+        # Try with `is_encrypted=True`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": True,
+                    },
+                },
+            }
+        }
+        # Use an incremental sync so that the room is considered `newly_left` and shows
+        # up down sync
+        response_body, _ = self.do_sync(
+            sync_body, since=before_rooms_token, tok=user1_tok
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {encrypted_room_id},
+            exact=True,
+        )
+
+        # Try with `is_encrypted=False`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": False,
+                    },
+                },
+            }
+        }
+        # Use an incremental sync so that the room is considered `newly_left` and shows
+        # up down sync
+        response_body, _ = self.do_sync(
+            sync_body, since=before_rooms_token, tok=user1_tok
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+    def test_filters_is_encrypted_after_we_left(self) -> None:
+        """
+        Test that we can apply a `filters.is_encrypted` against a room that was encrypted
+        after we left the room (make sure we don't just use the current state)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Get a token before we create any rooms
+        sync_body: JsonDict = {
+            "lists": {},
+        }
+        response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Create an unencrypted room
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Leave the room
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+        self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+        # Create a room that will be encrypted
+        encrypted_after_we_left_room_id = self.helper.create_room_as(
+            user2_id, tok=user2_tok
+        )
+        # Leave the room
+        self.helper.join(encrypted_after_we_left_room_id, user1_id, tok=user1_tok)
+        self.helper.leave(encrypted_after_we_left_room_id, user1_id, tok=user1_tok)
+
+        # Encrypt the room after we've left
+        self.helper.send_state(
+            encrypted_after_we_left_room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user2_tok,
+        )
+
+        # Try with `is_encrypted=True`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": True,
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(
+            sync_body, since=before_rooms_token, tok=user1_tok
+        )
+        if self.use_new_tables:
+            self.assertIncludes(
+                set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+                set(),
+                exact=True,
+            )
+        else:
+            # Even though we left the room before it was encrypted, we still see it because
+            # someone else on our server is still participating in the room and we "leak"
+            # the current state to the left user. But we consider the room encryption status
+            # to not be a secret given it's often set at the start of the room and it's one
+            # of the stripped state events that is normally handed out.
+            self.assertIncludes(
+                set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+                {encrypted_after_we_left_room_id},
+                exact=True,
+            )
+
+        # Try with `is_encrypted=False`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": False,
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(
+            sync_body, since=before_rooms_token, tok=user1_tok
+        )
+        if self.use_new_tables:
+            self.assertIncludes(
+                set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+                {room_id, encrypted_after_we_left_room_id},
+                exact=True,
+            )
+        else:
+            # Even though we left the room before it was encrypted... (see comment above)
+            self.assertIncludes(
+                set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+                {room_id},
+                exact=True,
+            )
+
+    def test_filters_is_encrypted_with_remote_invite_room_no_stripped_state(
+        self,
+    ) -> None:
+        """
+        Test that we can apply a `filters.is_encrypted` filter against a remote invite
+        room without any `unsigned.invite_room_state` (stripped state).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room without any `unsigned.invite_room_state`
+        _remote_invite_room_id = self._create_remote_invite_room_for_user(
+            user1_id, None
+        )
+
+        # Create an unencrypted room
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create an encrypted room
+        encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.helper.send_state(
+            encrypted_room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+
+        # Try with `is_encrypted=True`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": True,
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # `remote_invite_room_id` should not appear because we can't figure out whether
+        # it is encrypted or not (no stripped state, `unsigned.invite_room_state`).
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {encrypted_room_id},
+            exact=True,
+        )
+
+        # Try with `is_encrypted=False`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": False,
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # `remote_invite_room_id` should not appear because we can't figure out whether
+        # it is encrypted or not (no stripped state, `unsigned.invite_room_state`).
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+    def test_filters_is_encrypted_with_remote_invite_encrypted_room(self) -> None:
+        """
+        Test that we can apply a `filters.is_encrypted` filter against a remote invite
+        encrypted room with some `unsigned.invite_room_state` (stripped state).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room with some `unsigned.invite_room_state`
+        # indicating that the room is encrypted.
+        remote_invite_room_id = self._create_remote_invite_room_for_user(
+            user1_id,
+            [
+                StrippedStateEvent(
+                    type=EventTypes.Create,
+                    state_key="",
+                    sender="@inviter:remote_server",
+                    content={
+                        EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                        EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                    },
+                ),
+                StrippedStateEvent(
+                    type=EventTypes.RoomEncryption,
+                    state_key="",
+                    sender="@inviter:remote_server",
+                    content={
+                        EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+                    },
+                ),
+            ],
+        )
+
+        # Create an unencrypted room
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create an encrypted room
+        encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.helper.send_state(
+            encrypted_room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+
+        # Try with `is_encrypted=True`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": True,
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # `remote_invite_room_id` should appear here because it is encrypted
+        # according to the stripped state
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {encrypted_room_id, remote_invite_room_id},
+            exact=True,
+        )
+
+        # Try with `is_encrypted=False`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": False,
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # `remote_invite_room_id` should not appear here because it is encrypted
+        # according to the stripped state
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+    def test_filters_is_encrypted_with_remote_invite_unencrypted_room(self) -> None:
+        """
+        Test that we can apply a `filters.is_encrypted` filter against a remote invite
+        unencrypted room with some `unsigned.invite_room_state` (stripped state).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room with some `unsigned.invite_room_state`
+        # but don't set any room encryption event.
+        remote_invite_room_id = self._create_remote_invite_room_for_user(
+            user1_id,
+            [
+                StrippedStateEvent(
+                    type=EventTypes.Create,
+                    state_key="",
+                    sender="@inviter:remote_server",
+                    content={
+                        EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                        EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                    },
+                ),
+                # No room encryption event
+            ],
+        )
+
+        # Create an unencrypted room
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create an encrypted room
+        encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        self.helper.send_state(
+            encrypted_room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+
+        # Try with `is_encrypted=True`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": True,
+                    },
+                },
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+        # `remote_invite_room_id` should not appear here because it is unencrypted
+        # according to the stripped state
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {encrypted_room_id},
+            exact=True,
+        )
+
+        # Try with `is_encrypted=False`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": False,
+                    },
+                },
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+        # `remote_invite_room_id` should appear because it is unencrypted according to
+        # the stripped state
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id, remote_invite_room_id},
+            exact=True,
+        )
+
+    def test_filters_is_encrypted_updated(self) -> None:
+        """
+        Make sure we get rooms if the encrypted room status is updated for a joined room
+        (`filters.is_encrypted`)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_encrypted": True,
+                    },
+                },
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # No rooms are encrypted yet
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            set(),
+            exact=True,
+        )
+
+        # Update the encryption status
+        self.helper.send_state(
+            room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+
+        # We should see the room now because it's encrypted
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+    def test_filters_is_invite_rooms(self) -> None:
+        """
+        Test `filters.is_invite` for rooms that the user has been invited to
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create a normal room
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+
+        # Create a room that user1 is invited to
+        invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        # Try with `is_invite=True`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_invite": True,
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {invite_room_id},
+            exact=True,
+        )
+
+        # Try with `is_invite=False`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "is_invite": False,
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+    def test_filters_room_types(self) -> None:
+        """
+        Test `filters.room_types` for different room types
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a normal room (no room type)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+
+        # Create an arbitrarily typed room
+        foo_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {
+                    EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz"
+                }
+            },
+        )
+
+        # Try finding only normal rooms
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [None],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+        # Try finding only spaces
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {space_room_id},
+            exact=True,
+        )
+
+        # Try finding normal rooms and spaces
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [None, RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id, space_room_id},
+            exact=True,
+        )
+
+        # Try finding an arbitrary room type
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": ["org.matrix.foobarbaz"],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {foo_room_id},
+            exact=True,
+        )
+
+        # Just make sure we know what happens when you specify an empty list of room_types
+        # (we should find nothing)
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            set(),
+            exact=True,
+        )
+
+    def test_filters_not_room_types(self) -> None:
+        """
+        Test `filters.not_room_types` for different room types
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a normal room (no room type)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+
+        # Create an arbitrarily typed room
+        foo_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {
+                    EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz"
+                }
+            },
+        )
+
+        # Try finding *NOT* normal rooms
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "not_room_types": [None],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {space_room_id, foo_room_id},
+            exact=True,
+        )
+
+        # Try finding *NOT* spaces
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "not_room_types": [RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id, foo_room_id},
+            exact=True,
+        )
+
+        # Try finding *NOT* normal rooms or spaces
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "not_room_types": [None, RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {foo_room_id},
+            exact=True,
+        )
+
+        # Test how it behaves when we have both `room_types` and `not_room_types`.
+        # `not_room_types` should win.
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [None],
+                        "not_room_types": [None],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # Nothing matches because nothing is both a normal room and not a normal room
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            set(),
+            exact=True,
+        )
+
+        # Test how it behaves when we have both `room_types` and `not_room_types`.
+        # `not_room_types` should win.
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [None, RoomTypes.SPACE],
+                        "not_room_types": [None],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {space_room_id},
+            exact=True,
+        )
+
+        # Just make sure we know what happens when you specify an empty list of not_room_types
+        # (we should find all of the rooms)
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "not_room_types": [],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id, foo_room_id, space_room_id},
+            exact=True,
+        )
+
+    def test_filters_room_types_server_left_room(self) -> None:
+        """
+        Test that we can apply a `filters.room_types` against a room that everyone has left.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Get a token before we create any rooms
+        sync_body: JsonDict = {
+            "lists": {},
+        }
+        response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Create a normal room (no room type)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # Leave the room
+        self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+        # Leave the room
+        self.helper.leave(space_room_id, user1_id, tok=user1_tok)
+
+        # Try finding only normal rooms
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [None],
+                    },
+                },
+            }
+        }
+        # Use an incremental sync so that the room is considered `newly_left` and shows
+        # up down sync
+        response_body, _ = self.do_sync(
+            sync_body, since=before_rooms_token, tok=user1_tok
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+        # Try finding only spaces
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        # Use an incremental sync so that the room is considered `newly_left` and shows
+        # up down sync
+        response_body, _ = self.do_sync(
+            sync_body, since=before_rooms_token, tok=user1_tok
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {space_room_id},
+            exact=True,
+        )
+
+    def test_filter_room_types_server_left_room2(self) -> None:
+        """
+        Test that we can apply a `filter.room_types` against a room that everyone has left.
+
+        There is still someone local who is invited to the rooms but that doesn't affect
+        whether the server is participating in the room (users need to be joined).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        _user2_tok = self.login(user2_id, "pass")
+
+        # Get a token before we create any rooms
+        sync_body: JsonDict = {
+            "lists": {},
+        }
+        response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Create a normal room (no room type)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # Invite user2
+        self.helper.invite(room_id, targ=user2_id, tok=user1_tok)
+        # User1 leaves the room
+        self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+        # Invite user2
+        self.helper.invite(space_room_id, targ=user2_id, tok=user1_tok)
+        # User1 leaves the room
+        self.helper.leave(space_room_id, user1_id, tok=user1_tok)
+
+        # Try finding only normal rooms
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [None],
+                    },
+                },
+            }
+        }
+        # Use an incremental sync so that the room is considered `newly_left` and shows
+        # up down sync
+        response_body, _ = self.do_sync(
+            sync_body, since=before_rooms_token, tok=user1_tok
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+        # Try finding only spaces
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        # Use an incremental sync so that the room is considered `newly_left` and shows
+        # up down sync
+        response_body, _ = self.do_sync(
+            sync_body, since=before_rooms_token, tok=user1_tok
+        )
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {space_room_id},
+            exact=True,
+        )
+
+    def test_filters_room_types_with_remote_invite_room_no_stripped_state(self) -> None:
+        """
+        Test that we can apply a `filters.room_types` filter against a remote invite
+        room without any `unsigned.invite_room_state` (stripped state).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room without any `unsigned.invite_room_state`
+        _remote_invite_room_id = self._create_remote_invite_room_for_user(
+            user1_id, None
+        )
+
+        # Create a normal room (no room type)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+
+        # Try finding only normal rooms
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [None],
+                    },
+                },
+            }
+        }
+        # `remote_invite_room_id` should not appear because we can't figure out what
+        # room type it is (no stripped state, `unsigned.invite_room_state`)
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+        # Try finding only spaces
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        # `remote_invite_room_id` should not appear because we can't figure out what
+        # room type it is (no stripped state, `unsigned.invite_room_state`)
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {space_room_id},
+            exact=True,
+        )
+
+    def test_filters_room_types_with_remote_invite_space(self) -> None:
+        """
+        Test that we can apply a `filters.room_types` filter against a remote invite
+        to a space room with some `unsigned.invite_room_state` (stripped state).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room with some `unsigned.invite_room_state` indicating
+        # that it is a space room
+        remote_invite_room_id = self._create_remote_invite_room_for_user(
+            user1_id,
+            [
+                StrippedStateEvent(
+                    type=EventTypes.Create,
+                    state_key="",
+                    sender="@inviter:remote_server",
+                    content={
+                        EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                        EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                        # Specify that it is a space room
+                        EventContentFields.ROOM_TYPE: RoomTypes.SPACE,
+                    },
+                ),
+            ],
+        )
+
+        # Create a normal room (no room type)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+
+        # Try finding only normal rooms
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [None],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # `remote_invite_room_id` should not appear here because it is a space room
+        # according to the stripped state
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+        # Try finding only spaces
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # `remote_invite_room_id` should appear here because it is a space room
+        # according to the stripped state
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {space_room_id, remote_invite_room_id},
+            exact=True,
+        )
+
+    def test_filters_room_types_with_remote_invite_normal_room(self) -> None:
+        """
+        Test that we can apply a `filters.room_types` filter against a remote invite
+        to a normal room with some `unsigned.invite_room_state` (stripped state).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room with some `unsigned.invite_room_state`
+        # but the create event does not specify a room type (normal room)
+        remote_invite_room_id = self._create_remote_invite_room_for_user(
+            user1_id,
+            [
+                StrippedStateEvent(
+                    type=EventTypes.Create,
+                    state_key="",
+                    sender="@inviter:remote_server",
+                    content={
+                        EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                        EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                        # No room type means this is a normal room
+                    },
+                ),
+            ],
+        )
+
+        # Create a normal room (no room type)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+
+        # Try finding only normal rooms
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [None],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # `remote_invite_room_id` should appear here because it is a normal room
+        # according to the stripped state (no room type)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id, remote_invite_room_id},
+            exact=True,
+        )
+
+        # Try finding only spaces
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # `remote_invite_room_id` should not appear here because it is a normal room
+        # according to the stripped state (no room type)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {space_room_id},
+            exact=True,
+        )
+
+    def _add_tag_to_room(
+        self, *, room_id: str, user_id: str, access_token: str, tag_name: str
+    ) -> None:
+        channel = self.make_request(
+            method="PUT",
+            path=f"/user/{user_id}/rooms/{room_id}/tags/{tag_name}",
+            content={},
+            access_token=access_token,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+    def test_filters_tags(self) -> None:
+        """
+        Test `filters.tags` for rooms with given tags
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a room with no tags
+        self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create some rooms with tags
+        foo_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        bar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # Create a room without multiple tags
+        foobar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Add the "foo" tag to the foo room
+        self._add_tag_to_room(
+            room_id=foo_room_id,
+            user_id=user1_id,
+            access_token=user1_tok,
+            tag_name="foo",
+        )
+        # Add the "bar" tag to the bar room
+        self._add_tag_to_room(
+            room_id=bar_room_id,
+            user_id=user1_id,
+            access_token=user1_tok,
+            tag_name="bar",
+        )
+        # Add both "foo" and "bar" tags to the foobar room
+        self._add_tag_to_room(
+            room_id=foobar_room_id,
+            user_id=user1_id,
+            access_token=user1_tok,
+            tag_name="foo",
+        )
+        self._add_tag_to_room(
+            room_id=foobar_room_id,
+            user_id=user1_id,
+            access_token=user1_tok,
+            tag_name="bar",
+        )
+
+        # Try finding rooms with the "foo" tag
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "tags": ["foo"],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {foo_room_id, foobar_room_id},
+            exact=True,
+        )
+
+        # Try finding rooms with either "foo" or "bar" tags
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "tags": ["foo", "bar"],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {foo_room_id, bar_room_id, foobar_room_id},
+            exact=True,
+        )
+
+        # Try with a random tag we didn't add
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "tags": ["flomp"],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # No rooms should match
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            set(),
+            exact=True,
+        )
+
+        # Just make sure we know what happens when you specify an empty list of tags
+        # (we should find nothing)
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "tags": [],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            set(),
+            exact=True,
+        )
+
+    def test_filters_not_tags(self) -> None:
+        """
+        Test `filters.not_tags` for excluding rooms with given tags
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a room with no tags
+        untagged_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create some rooms with tags
+        foo_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        bar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # Create a room without multiple tags
+        foobar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Add the "foo" tag to the foo room
+        self._add_tag_to_room(
+            room_id=foo_room_id,
+            user_id=user1_id,
+            access_token=user1_tok,
+            tag_name="foo",
+        )
+        # Add the "bar" tag to the bar room
+        self._add_tag_to_room(
+            room_id=bar_room_id,
+            user_id=user1_id,
+            access_token=user1_tok,
+            tag_name="bar",
+        )
+        # Add both "foo" and "bar" tags to the foobar room
+        self._add_tag_to_room(
+            room_id=foobar_room_id,
+            user_id=user1_id,
+            access_token=user1_tok,
+            tag_name="foo",
+        )
+        self._add_tag_to_room(
+            room_id=foobar_room_id,
+            user_id=user1_id,
+            access_token=user1_tok,
+            tag_name="bar",
+        )
+
+        # Try finding rooms without the "foo" tag
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "not_tags": ["foo"],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {untagged_room_id, bar_room_id},
+            exact=True,
+        )
+
+        # Try finding rooms without either "foo" or "bar" tags
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "not_tags": ["foo", "bar"],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {untagged_room_id},
+            exact=True,
+        )
+
+        # Test how it behaves when we have both `tags` and `not_tags`.
+        # `not_tags` should win.
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "tags": ["foo"],
+                        "not_tags": ["foo"],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # Nothing matches because nothing is both tagged with "foo" and not tagged with "foo"
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            set(),
+            exact=True,
+        )
+
+        # Just make sure we know what happens when you specify an empty list of not_tags
+        # (we should find all of the rooms)
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {
+                        "not_tags": [],
+                    },
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {untagged_room_id, foo_room_id, bar_room_id, foobar_room_id},
+            exact=True,
+        )
diff --git a/tests/rest/client/sliding_sync/test_room_subscriptions.py b/tests/rest/client/sliding_sync/test_room_subscriptions.py
new file mode 100644
index 0000000000..285fdaaf78
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_room_subscriptions.py
@@ -0,0 +1,303 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+from http import HTTPStatus
+
+from parameterized import parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import EventTypes, HistoryVisibility
+from synapse.rest.client import login, room, sync
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncRoomSubscriptionsTestCase(SlidingSyncBase):
+    """
+    Test `room_subscriptions` in the Sliding Sync API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.storage_controllers = hs.get_storage_controllers()
+
+        super().prepare(reactor, clock, hs)
+
+    def test_room_subscriptions_with_join_membership(self) -> None:
+        """
+        Test `room_subscriptions` with a joined room should give us timeline and current
+        state events.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request with just the room subscription
+        sync_body = {
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                    ],
+                    "timeline_limit": 1,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # We should see some state
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+            },
+            exact=True,
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+        # We should see some events
+        self.assertEqual(
+            [
+                event["event_id"]
+                for event in response_body["rooms"][room_id1]["timeline"]
+            ],
+            [
+                join_response["event_id"],
+            ],
+            response_body["rooms"][room_id1]["timeline"],
+        )
+        # No "live" events in an initial sync (no `from_token` to define the "live"
+        # range)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["num_live"],
+            0,
+            response_body["rooms"][room_id1],
+        )
+        # There are more events to paginate to
+        self.assertEqual(
+            response_body["rooms"][room_id1]["limited"],
+            True,
+            response_body["rooms"][room_id1],
+        )
+
+    def test_room_subscriptions_with_leave_membership(self) -> None:
+        """
+        Test `room_subscriptions` with a leave room should give us timeline and state
+        events up to the leave event.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+
+        join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # Send some events after user1 leaves
+        self.helper.send(room_id1, "activity after leave", tok=user2_tok)
+        # Update state after user1 leaves
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "qux"},
+            tok=user2_tok,
+        )
+
+        # Make the Sliding Sync request with just the room subscription
+        sync_body = {
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [
+                        ["org.matrix.foo_state", ""],
+                    ],
+                    "timeline_limit": 2,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # We should see the state at the time of the leave
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[("org.matrix.foo_state", "")],
+            },
+            exact=True,
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+        # We should see some before we left (nothing after)
+        self.assertEqual(
+            [
+                event["event_id"]
+                for event in response_body["rooms"][room_id1]["timeline"]
+            ],
+            [
+                join_response["event_id"],
+                leave_response["event_id"],
+            ],
+            response_body["rooms"][room_id1]["timeline"],
+        )
+        # No "live" events in an initial sync (no `from_token` to define the "live"
+        # range)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["num_live"],
+            0,
+            response_body["rooms"][room_id1],
+        )
+        # There are more events to paginate to
+        self.assertEqual(
+            response_body["rooms"][room_id1]["limited"],
+            True,
+            response_body["rooms"][room_id1],
+        )
+
+    def test_room_subscriptions_no_leak_private_room(self) -> None:
+        """
+        Test `room_subscriptions` with a private room we have never been in should not
+        leak any data to the user.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=False)
+
+        # We should not be able to join the private room
+        self.helper.join(
+            room_id1, user1_id, tok=user1_tok, expect_code=HTTPStatus.FORBIDDEN
+        )
+
+        # Make the Sliding Sync request with just the room subscription
+        sync_body = {
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                    ],
+                    "timeline_limit": 1,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # We should not see the room at all (we're not in it)
+        self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"])
+
+    def test_room_subscriptions_world_readable(self) -> None:
+        """
+        Test `room_subscriptions` with a room that has `world_readable` history visibility
+
+        FIXME: We should be able to see the room timeline and state
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create a room with `world_readable` history visibility
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "preset": "public_chat",
+                "initial_state": [
+                    {
+                        "content": {
+                            "history_visibility": HistoryVisibility.WORLD_READABLE
+                        },
+                        "state_key": "",
+                        "type": EventTypes.RoomHistoryVisibility,
+                    }
+                ],
+            },
+        )
+        # Ensure we're testing with a room with `world_readable` history visibility
+        # which means events are visible to anyone even without membership.
+        history_visibility_response = self.helper.get_state(
+            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+        )
+        self.assertEqual(
+            history_visibility_response.get("history_visibility"),
+            HistoryVisibility.WORLD_READABLE,
+        )
+
+        # Note: We never join the room
+
+        # Make the Sliding Sync request with just the room subscription
+        sync_body = {
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                    ],
+                    "timeline_limit": 1,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # FIXME: In the future, we should be able to see the room because it's
+        # `world_readable` but currently we don't support this.
+        self.assertIsNone(response_body["rooms"].get(room_id1), response_body["rooms"])
diff --git a/tests/rest/client/sliding_sync/test_rooms_invites.py b/tests/rest/client/sliding_sync/test_rooms_invites.py
new file mode 100644
index 0000000000..882762ca29
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_rooms_invites.py
@@ -0,0 +1,528 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+
+from parameterized import parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import EventTypes, HistoryVisibility
+from synapse.rest.client import login, room, sync
+from synapse.server import HomeServer
+from synapse.types import UserID
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncRoomsInvitesTestCase(SlidingSyncBase):
+    """
+    Test to make sure the `rooms` response looks good for invites in the Sliding Sync API.
+
+    Invites behave a lot different than other rooms because we don't include the
+    `timeline` (`num_live`, `limited`, `prev_batch`) or `required_state` in favor of
+    some stripped state under the `invite_state` key.
+
+    Knocks probably have the same behavior but the spec doesn't mention knocks yet.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.storage_controllers = hs.get_storage_controllers()
+
+        super().prepare(reactor, clock, hs)
+
+    def test_rooms_invite_shared_history_initial_sync(self) -> None:
+        """
+        Test that `rooms` we are invited to have some stripped `invite_state` during an
+        initial sync.
+
+        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+        but we also shouldn't see any timeline events because the history visiblity is
+        `shared` and we haven't joined the room yet.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user1 = UserID.from_string(user1_id)
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user2 = UserID.from_string(user2_id)
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Ensure we're testing with a room with `shared` history visibility which means
+        # history visible until you actually join the room.
+        history_visibility_response = self.helper.get_state(
+            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+        )
+        self.assertEqual(
+            history_visibility_response.get("history_visibility"),
+            HistoryVisibility.SHARED,
+        )
+
+        self.helper.send(room_id1, "activity before1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before2", tok=user2_tok)
+        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity after3", tok=user2_tok)
+        self.helper.send(room_id1, "activity after4", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 3,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # `timeline` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("timeline"),
+            response_body["rooms"][room_id1],
+        )
+        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("num_live"),
+            response_body["rooms"][room_id1],
+        )
+        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("limited"),
+            response_body["rooms"][room_id1],
+        )
+        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("prev_batch"),
+            response_body["rooms"][room_id1],
+        )
+        # `required_state` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("required_state"),
+            response_body["rooms"][room_id1],
+        )
+        # We should have some `stripped_state` so the potential joiner can identify the
+        # room (we don't care about the order).
+        self.assertCountEqual(
+            response_body["rooms"][room_id1]["invite_state"],
+            [
+                {
+                    "content": {"creator": user2_id, "room_version": "10"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.create",
+                },
+                {
+                    "content": {"join_rule": "public"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.join_rules",
+                },
+                {
+                    "content": {"displayname": user2.localpart, "membership": "join"},
+                    "sender": user2_id,
+                    "state_key": user2_id,
+                    "type": "m.room.member",
+                },
+                {
+                    "content": {"displayname": user1.localpart, "membership": "invite"},
+                    "sender": user2_id,
+                    "state_key": user1_id,
+                    "type": "m.room.member",
+                },
+            ],
+            response_body["rooms"][room_id1]["invite_state"],
+        )
+
+    def test_rooms_invite_shared_history_incremental_sync(self) -> None:
+        """
+        Test that `rooms` we are invited to have some stripped `invite_state` during an
+        incremental sync.
+
+        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+        but we also shouldn't see any timeline events because the history visiblity is
+        `shared` and we haven't joined the room yet.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user1 = UserID.from_string(user1_id)
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user2 = UserID.from_string(user2_id)
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Ensure we're testing with a room with `shared` history visibility which means
+        # history visible until you actually join the room.
+        history_visibility_response = self.helper.get_state(
+            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+        )
+        self.assertEqual(
+            history_visibility_response.get("history_visibility"),
+            HistoryVisibility.SHARED,
+        )
+
+        self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
+        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
+        self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 3,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        self.helper.send(room_id1, "activity after token5", tok=user2_tok)
+        self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # `timeline` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("timeline"),
+            response_body["rooms"][room_id1],
+        )
+        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("num_live"),
+            response_body["rooms"][room_id1],
+        )
+        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("limited"),
+            response_body["rooms"][room_id1],
+        )
+        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("prev_batch"),
+            response_body["rooms"][room_id1],
+        )
+        # `required_state` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("required_state"),
+            response_body["rooms"][room_id1],
+        )
+        # We should have some `stripped_state` so the potential joiner can identify the
+        # room (we don't care about the order).
+        self.assertCountEqual(
+            response_body["rooms"][room_id1]["invite_state"],
+            [
+                {
+                    "content": {"creator": user2_id, "room_version": "10"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.create",
+                },
+                {
+                    "content": {"join_rule": "public"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.join_rules",
+                },
+                {
+                    "content": {"displayname": user2.localpart, "membership": "join"},
+                    "sender": user2_id,
+                    "state_key": user2_id,
+                    "type": "m.room.member",
+                },
+                {
+                    "content": {"displayname": user1.localpart, "membership": "invite"},
+                    "sender": user2_id,
+                    "state_key": user1_id,
+                    "type": "m.room.member",
+                },
+            ],
+            response_body["rooms"][room_id1]["invite_state"],
+        )
+
+    def test_rooms_invite_world_readable_history_initial_sync(self) -> None:
+        """
+        Test that `rooms` we are invited to have some stripped `invite_state` during an
+        initial sync.
+
+        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+        but depending on the semantics we decide, we could potentially see some
+        historical events before/after the `from_token` because the history is
+        `world_readable`. Same situation for events after the `from_token` if the
+        history visibility was set to `invited`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user1 = UserID.from_string(user1_id)
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user2 = UserID.from_string(user2_id)
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "preset": "public_chat",
+                "initial_state": [
+                    {
+                        "content": {
+                            "history_visibility": HistoryVisibility.WORLD_READABLE
+                        },
+                        "state_key": "",
+                        "type": EventTypes.RoomHistoryVisibility,
+                    }
+                ],
+            },
+        )
+        # Ensure we're testing with a room with `world_readable` history visibility
+        # which means events are visible to anyone even without membership.
+        history_visibility_response = self.helper.get_state(
+            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+        )
+        self.assertEqual(
+            history_visibility_response.get("history_visibility"),
+            HistoryVisibility.WORLD_READABLE,
+        )
+
+        self.helper.send(room_id1, "activity before1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before2", tok=user2_tok)
+        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity after3", tok=user2_tok)
+        self.helper.send(room_id1, "activity after4", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    # Large enough to see the latest events and before the invite
+                    "timeline_limit": 4,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # `timeline` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("timeline"),
+            response_body["rooms"][room_id1],
+        )
+        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("num_live"),
+            response_body["rooms"][room_id1],
+        )
+        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("limited"),
+            response_body["rooms"][room_id1],
+        )
+        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("prev_batch"),
+            response_body["rooms"][room_id1],
+        )
+        # `required_state` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("required_state"),
+            response_body["rooms"][room_id1],
+        )
+        # We should have some `stripped_state` so the potential joiner can identify the
+        # room (we don't care about the order).
+        self.assertCountEqual(
+            response_body["rooms"][room_id1]["invite_state"],
+            [
+                {
+                    "content": {"creator": user2_id, "room_version": "10"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.create",
+                },
+                {
+                    "content": {"join_rule": "public"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.join_rules",
+                },
+                {
+                    "content": {"displayname": user2.localpart, "membership": "join"},
+                    "sender": user2_id,
+                    "state_key": user2_id,
+                    "type": "m.room.member",
+                },
+                {
+                    "content": {"displayname": user1.localpart, "membership": "invite"},
+                    "sender": user2_id,
+                    "state_key": user1_id,
+                    "type": "m.room.member",
+                },
+            ],
+            response_body["rooms"][room_id1]["invite_state"],
+        )
+
+    def test_rooms_invite_world_readable_history_incremental_sync(self) -> None:
+        """
+        Test that `rooms` we are invited to have some stripped `invite_state` during an
+        incremental sync.
+
+        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
+        but depending on the semantics we decide, we could potentially see some
+        historical events before/after the `from_token` because the history is
+        `world_readable`. Same situation for events after the `from_token` if the
+        history visibility was set to `invited`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user1 = UserID.from_string(user1_id)
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user2 = UserID.from_string(user2_id)
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "preset": "public_chat",
+                "initial_state": [
+                    {
+                        "content": {
+                            "history_visibility": HistoryVisibility.WORLD_READABLE
+                        },
+                        "state_key": "",
+                        "type": EventTypes.RoomHistoryVisibility,
+                    }
+                ],
+            },
+        )
+        # Ensure we're testing with a room with `world_readable` history visibility
+        # which means events are visible to anyone even without membership.
+        history_visibility_response = self.helper.get_state(
+            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
+        )
+        self.assertEqual(
+            history_visibility_response.get("history_visibility"),
+            HistoryVisibility.WORLD_READABLE,
+        )
+
+        self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
+        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
+        self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    # Large enough to see the latest events and before the invite
+                    "timeline_limit": 4,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        self.helper.send(room_id1, "activity after token5", tok=user2_tok)
+        self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
+
+        # Make the incremental Sliding Sync request
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # `timeline` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("timeline"),
+            response_body["rooms"][room_id1],
+        )
+        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("num_live"),
+            response_body["rooms"][room_id1],
+        )
+        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("limited"),
+            response_body["rooms"][room_id1],
+        )
+        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("prev_batch"),
+            response_body["rooms"][room_id1],
+        )
+        # `required_state` is omitted for `invite` rooms with `stripped_state`
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("required_state"),
+            response_body["rooms"][room_id1],
+        )
+        # We should have some `stripped_state` so the potential joiner can identify the
+        # room (we don't care about the order).
+        self.assertCountEqual(
+            response_body["rooms"][room_id1]["invite_state"],
+            [
+                {
+                    "content": {"creator": user2_id, "room_version": "10"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.create",
+                },
+                {
+                    "content": {"join_rule": "public"},
+                    "sender": user2_id,
+                    "state_key": "",
+                    "type": "m.room.join_rules",
+                },
+                {
+                    "content": {"displayname": user2.localpart, "membership": "join"},
+                    "sender": user2_id,
+                    "state_key": user2_id,
+                    "type": "m.room.member",
+                },
+                {
+                    "content": {"displayname": user1.localpart, "membership": "invite"},
+                    "sender": user2_id,
+                    "state_key": user1_id,
+                    "type": "m.room.member",
+                },
+            ],
+            response_body["rooms"][room_id1]["invite_state"],
+        )
diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py
new file mode 100644
index 0000000000..0a8b2c02c2
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_rooms_meta.py
@@ -0,0 +1,1338 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+
+from parameterized import parameterized, parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import EventContentFields, EventTypes, Membership
+from synapse.api.room_versions import RoomVersions
+from synapse.rest.client import login, room, sync
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+from tests.test_utils.event_injection import create_event
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncRoomsMetaTestCase(SlidingSyncBase):
+    """
+    Test rooms meta info like name, avatar, joined_count, invited_count, is_dm,
+    bump_stamp in the Sliding Sync API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.storage_controllers = hs.get_storage_controllers()
+        self.state_handler = self.hs.get_state_handler()
+        persistence = self.hs.get_storage_controllers().persistence
+        assert persistence is not None
+        self.persistence = persistence
+
+        super().prepare(reactor, clock, hs)
+
+    def test_rooms_meta_when_joined_initial(self) -> None:
+        """
+        Test that the `rooms` `name` and `avatar` are included in the initial sync
+        response and reflect the current state of the room when the user is joined to
+        the room.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "name": "my super room",
+            },
+        )
+        # Set the room avatar URL
+        self.helper.send_state(
+            room_id1,
+            EventTypes.RoomAvatar,
+            {"url": "mxc://DUMMY_MEDIA_ID"},
+            tok=user2_tok,
+        )
+
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Reflect the current state of the room
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["name"],
+            "my super room",
+            response_body["rooms"][room_id1],
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id1]["avatar"],
+            "mxc://DUMMY_MEDIA_ID",
+            response_body["rooms"][room_id1],
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id1]["joined_count"],
+            2,
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id1]["invited_count"],
+            0,
+        )
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("is_dm"),
+        )
+
+    def test_rooms_meta_when_joined_incremental_no_change(self) -> None:
+        """
+        Test that the `rooms` `name` and `avatar` aren't included in an incremental sync
+        response if they haven't changed.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "name": "my super room",
+            },
+        )
+        # Set the room avatar URL
+        self.helper.send_state(
+            room_id1,
+            EventTypes.RoomAvatar,
+            {"url": "mxc://DUMMY_MEDIA_ID"},
+            tok=user2_tok,
+        )
+
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    # This needs to be set to one so the `RoomResult` isn't empty and
+                    # the room comes down incremental sync when we send a new message.
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Send a message to make the room come down sync
+        self.helper.send(room_id1, "message in room1", tok=user2_tok)
+
+        # Incremental sync
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # We should only see changed meta info (nothing changed so we shouldn't see any
+        # of these fields)
+        self.assertNotIn(
+            "initial",
+            response_body["rooms"][room_id1],
+        )
+        self.assertNotIn(
+            "name",
+            response_body["rooms"][room_id1],
+        )
+        self.assertNotIn(
+            "avatar",
+            response_body["rooms"][room_id1],
+        )
+        self.assertNotIn(
+            "joined_count",
+            response_body["rooms"][room_id1],
+        )
+        self.assertNotIn(
+            "invited_count",
+            response_body["rooms"][room_id1],
+        )
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("is_dm"),
+        )
+
+    @parameterized.expand(
+        [
+            ("in_required_state", True),
+            ("not_in_required_state", False),
+        ]
+    )
+    def test_rooms_meta_when_joined_incremental_with_state_change(
+        self, test_description: str, include_changed_state_in_required_state: bool
+    ) -> None:
+        """
+        Test that the `rooms` `name` and `avatar` are included in an incremental sync
+        response if they changed.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "name": "my super room",
+            },
+        )
+        # Set the room avatar URL
+        self.helper.send_state(
+            room_id1,
+            EventTypes.RoomAvatar,
+            {"url": "mxc://DUMMY_MEDIA_ID"},
+            tok=user2_tok,
+        )
+
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": (
+                        [[EventTypes.Name, ""], [EventTypes.RoomAvatar, ""]]
+                        # Conditionally include the changed state in the
+                        # `required_state` to make sure whether we request it or not,
+                        # the new room name still flows down to the client.
+                        if include_changed_state_in_required_state
+                        else []
+                    ),
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Update the room name
+        self.helper.send_state(
+            room_id1,
+            EventTypes.Name,
+            {EventContentFields.ROOM_NAME: "my super duper room"},
+            tok=user2_tok,
+        )
+        # Update the room avatar URL
+        self.helper.send_state(
+            room_id1,
+            EventTypes.RoomAvatar,
+            {"url": "mxc://DUMMY_MEDIA_ID_UPDATED"},
+            tok=user2_tok,
+        )
+
+        # Incremental sync
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # We should only see changed meta info (the room name and avatar)
+        self.assertNotIn(
+            "initial",
+            response_body["rooms"][room_id1],
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id1]["name"],
+            "my super duper room",
+            response_body["rooms"][room_id1],
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id1]["avatar"],
+            "mxc://DUMMY_MEDIA_ID_UPDATED",
+            response_body["rooms"][room_id1],
+        )
+        self.assertNotIn(
+            "joined_count",
+            response_body["rooms"][room_id1],
+        )
+        self.assertNotIn(
+            "invited_count",
+            response_body["rooms"][room_id1],
+        )
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("is_dm"),
+        )
+
+    def test_rooms_meta_when_invited(self) -> None:
+        """
+        Test that the `rooms` `name` and `avatar` are included in the response and
+        reflect the current state of the room when the user is invited to the room.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "name": "my super room",
+            },
+        )
+        # Set the room avatar URL
+        self.helper.send_state(
+            room_id1,
+            EventTypes.RoomAvatar,
+            {"url": "mxc://DUMMY_MEDIA_ID"},
+            tok=user2_tok,
+        )
+
+        # User1 is invited to the room
+        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        # Update the room name after user1 has left
+        self.helper.send_state(
+            room_id1,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user2_tok,
+        )
+        # Update the room avatar URL after user1 has left
+        self.helper.send_state(
+            room_id1,
+            EventTypes.RoomAvatar,
+            {"url": "mxc://UPDATED_DUMMY_MEDIA_ID"},
+            tok=user2_tok,
+        )
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # This should still reflect the current state of the room even when the user is
+        # invited.
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["name"],
+            "my super duper room",
+            response_body["rooms"][room_id1],
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id1]["avatar"],
+            "mxc://UPDATED_DUMMY_MEDIA_ID",
+            response_body["rooms"][room_id1],
+        )
+
+        # We don't give extra room information to invitees
+        self.assertNotIn(
+            "joined_count",
+            response_body["rooms"][room_id1],
+        )
+        self.assertNotIn(
+            "invited_count",
+            response_body["rooms"][room_id1],
+        )
+
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("is_dm"),
+        )
+
+    def test_rooms_meta_when_banned(self) -> None:
+        """
+        Test that the `rooms` `name` and `avatar` reflect the state of the room when the
+        user was banned (do not leak current state).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "name": "my super room",
+            },
+        )
+        # Set the room avatar URL
+        self.helper.send_state(
+            room_id1,
+            EventTypes.RoomAvatar,
+            {"url": "mxc://DUMMY_MEDIA_ID"},
+            tok=user2_tok,
+        )
+
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        # Update the room name after user1 has left
+        self.helper.send_state(
+            room_id1,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user2_tok,
+        )
+        # Update the room avatar URL after user1 has left
+        self.helper.send_state(
+            room_id1,
+            EventTypes.RoomAvatar,
+            {"url": "mxc://UPDATED_DUMMY_MEDIA_ID"},
+            tok=user2_tok,
+        )
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Reflect the state of the room at the time of leaving
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["name"],
+            "my super room",
+            response_body["rooms"][room_id1],
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id1]["avatar"],
+            "mxc://DUMMY_MEDIA_ID",
+            response_body["rooms"][room_id1],
+        )
+
+        # FIXME: We possibly want to return joined and invited counts for rooms
+        # you're banned form
+        self.assertNotIn(
+            "joined_count",
+            response_body["rooms"][room_id1],
+        )
+        self.assertNotIn(
+            "invited_count",
+            response_body["rooms"][room_id1],
+        )
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("is_dm"),
+        )
+
+    def test_rooms_meta_heroes(self) -> None:
+        """
+        Test that the `rooms` `heroes` are included in the response when the room
+        doesn't have a room name set.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        _user3_tok = self.login(user3_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "name": "my super room",
+            },
+        )
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        # User3 is invited
+        self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok)
+
+        room_id2 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                # No room name set so that `heroes` is populated
+                #
+                # "name": "my super room2",
+            },
+        )
+        self.helper.join(room_id2, user1_id, tok=user1_tok)
+        # User3 is invited
+        self.helper.invite(room_id2, src=user2_id, targ=user3_id, tok=user2_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Room1 has a name so we shouldn't see any `heroes` which the client would use
+        # the calculate the room name themselves.
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["name"],
+            "my super room",
+            response_body["rooms"][room_id1],
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("heroes"))
+        self.assertEqual(
+            response_body["rooms"][room_id1]["joined_count"],
+            2,
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id1]["invited_count"],
+            1,
+        )
+
+        # Room2 doesn't have a name so we should see `heroes` populated
+        self.assertEqual(response_body["rooms"][room_id2]["initial"], True)
+        self.assertIsNone(response_body["rooms"][room_id2].get("name"))
+        self.assertCountEqual(
+            [
+                hero["user_id"]
+                for hero in response_body["rooms"][room_id2].get("heroes", [])
+            ],
+            # Heroes shouldn't include the user themselves (we shouldn't see user1)
+            [user2_id, user3_id],
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id2]["joined_count"],
+            2,
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id2]["invited_count"],
+            1,
+        )
+
+        # We didn't request any state so we shouldn't see any `required_state`
+        self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
+        self.assertIsNone(response_body["rooms"][room_id2].get("required_state"))
+
+    def test_rooms_meta_heroes_max(self) -> None:
+        """
+        Test that the `rooms` `heroes` only includes the first 5 users (not including
+        yourself).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+        user4_id = self.register_user("user4", "pass")
+        user4_tok = self.login(user4_id, "pass")
+        user5_id = self.register_user("user5", "pass")
+        user5_tok = self.login(user5_id, "pass")
+        user6_id = self.register_user("user6", "pass")
+        user6_tok = self.login(user6_id, "pass")
+        user7_id = self.register_user("user7", "pass")
+        user7_tok = self.login(user7_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                # No room name set so that `heroes` is populated
+                #
+                # "name": "my super room",
+            },
+        )
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.join(room_id1, user3_id, tok=user3_tok)
+        self.helper.join(room_id1, user4_id, tok=user4_tok)
+        self.helper.join(room_id1, user5_id, tok=user5_tok)
+        self.helper.join(room_id1, user6_id, tok=user6_tok)
+        self.helper.join(room_id1, user7_id, tok=user7_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Room2 doesn't have a name so we should see `heroes` populated
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+        self.assertIsNone(response_body["rooms"][room_id1].get("name"))
+        self.assertCountEqual(
+            [
+                hero["user_id"]
+                for hero in response_body["rooms"][room_id1].get("heroes", [])
+            ],
+            # Heroes should be the first 5 users in the room (excluding the user
+            # themselves, we shouldn't see `user1`)
+            [user2_id, user3_id, user4_id, user5_id, user6_id],
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id1]["joined_count"],
+            7,
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id1]["invited_count"],
+            0,
+        )
+
+        # We didn't request any state so we shouldn't see any `required_state`
+        self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
+
+    def test_rooms_meta_heroes_when_banned(self) -> None:
+        """
+        Test that the `rooms` `heroes` are included in the response when the room
+        doesn't have a room name set but doesn't leak information past their ban.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        _user3_tok = self.login(user3_id, "pass")
+        user4_id = self.register_user("user4", "pass")
+        user4_tok = self.login(user4_id, "pass")
+        user5_id = self.register_user("user5", "pass")
+        _user5_tok = self.login(user5_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                # No room name set so that `heroes` is populated
+                #
+                # "name": "my super room",
+            },
+        )
+        # User1 joins the room
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        # User3 is invited
+        self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok)
+
+        # User1 is banned from the room
+        self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        # User4 joins the room after user1 is banned
+        self.helper.join(room_id1, user4_id, tok=user4_tok)
+        # User5 is invited after user1 is banned
+        self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Room doesn't have a name so we should see `heroes` populated
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+        self.assertIsNone(response_body["rooms"][room_id1].get("name"))
+        self.assertCountEqual(
+            [
+                hero["user_id"]
+                for hero in response_body["rooms"][room_id1].get("heroes", [])
+            ],
+            # Heroes shouldn't include the user themselves (we shouldn't see user1). We
+            # also shouldn't see user4 since they joined after user1 was banned.
+            #
+            # FIXME: The actual result should be `[user2_id, user3_id]` but we currently
+            # don't support this for rooms where the user has left/been banned.
+            [],
+        )
+
+        # FIXME: We possibly want to return joined and invited counts for rooms
+        # you're banned form
+        self.assertNotIn(
+            "joined_count",
+            response_body["rooms"][room_id1],
+        )
+        self.assertNotIn(
+            "invited_count",
+            response_body["rooms"][room_id1],
+        )
+
+    def test_rooms_meta_heroes_incremental_sync_no_change(self) -> None:
+        """
+        Test that the `rooms` `heroes` aren't included in an incremental sync
+        response if they haven't changed.
+
+        (when the room doesn't have a room name set)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        _user3_tok = self.login(user3_id, "pass")
+
+        room_id = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                # No room name set so that `heroes` is populated
+                #
+                # "name": "my super room2",
+            },
+        )
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+        # User3 is invited
+        self.helper.invite(room_id, src=user2_id, targ=user3_id, tok=user2_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    # This needs to be set to one so the `RoomResult` isn't empty and
+                    # the room comes down incremental sync when we send a new message.
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Send a message to make the room come down sync
+        self.helper.send(room_id, "message in room", tok=user2_tok)
+
+        # Incremental sync
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # This is an incremental sync and the second time we have seen this room so it
+        # isn't `initial`
+        self.assertNotIn(
+            "initial",
+            response_body["rooms"][room_id],
+        )
+        # Room shouldn't have a room name because we're testing the `heroes` field which
+        # will only has a chance to appear if the room doesn't have a name.
+        self.assertNotIn(
+            "name",
+            response_body["rooms"][room_id],
+        )
+        # No change to heroes
+        self.assertNotIn(
+            "heroes",
+            response_body["rooms"][room_id],
+        )
+        # No change to member counts
+        self.assertNotIn(
+            "joined_count",
+            response_body["rooms"][room_id],
+        )
+        self.assertNotIn(
+            "invited_count",
+            response_body["rooms"][room_id],
+        )
+        # We didn't request any state so we shouldn't see any `required_state`
+        self.assertNotIn(
+            "required_state",
+            response_body["rooms"][room_id],
+        )
+
+    def test_rooms_meta_heroes_incremental_sync_with_membership_change(self) -> None:
+        """
+        Test that the `rooms` `heroes` are included in an incremental sync response if
+        the membership has changed.
+
+        (when the room doesn't have a room name set)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+
+        room_id = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                # No room name set so that `heroes` is populated
+                #
+                # "name": "my super room2",
+            },
+        )
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+        # User3 is invited
+        self.helper.invite(room_id, src=user2_id, targ=user3_id, tok=user2_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # User3 joins (membership change)
+        self.helper.join(room_id, user3_id, tok=user3_tok)
+
+        # Incremental sync
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # This is an incremental sync and the second time we have seen this room so it
+        # isn't `initial`
+        self.assertNotIn(
+            "initial",
+            response_body["rooms"][room_id],
+        )
+        # Room shouldn't have a room name because we're testing the `heroes` field which
+        # will only has a chance to appear if the room doesn't have a name.
+        self.assertNotIn(
+            "name",
+            response_body["rooms"][room_id],
+        )
+        # Membership change so we should see heroes and membership counts
+        self.assertCountEqual(
+            [
+                hero["user_id"]
+                for hero in response_body["rooms"][room_id].get("heroes", [])
+            ],
+            # Heroes shouldn't include the user themselves (we shouldn't see user1)
+            [user2_id, user3_id],
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id]["joined_count"],
+            3,
+        )
+        self.assertEqual(
+            response_body["rooms"][room_id]["invited_count"],
+            0,
+        )
+        # We didn't request any state so we shouldn't see any `required_state`
+        self.assertNotIn(
+            "required_state",
+            response_body["rooms"][room_id],
+        )
+
+    def test_rooms_bump_stamp(self) -> None:
+        """
+        Test that `bump_stamp` is present and pointing to relevant events.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+        )
+        event_response1 = message_response = self.helper.send(
+            room_id1, "message in room1", tok=user1_tok
+        )
+        event_pos1 = self.get_success(
+            self.store.get_position_for_event(event_response1["event_id"])
+        )
+        room_id2 = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+        )
+        send_response2 = self.helper.send(room_id2, "message in room2", tok=user1_tok)
+        event_pos2 = self.get_success(
+            self.store.get_position_for_event(send_response2["event_id"])
+        )
+
+        # Send a reaction in room1 but it shouldn't affect the `bump_stamp`
+        # because reactions are not part of the `DEFAULT_BUMP_EVENT_TYPES`
+        self.helper.send_event(
+            room_id1,
+            type=EventTypes.Reaction,
+            content={
+                "m.relates_to": {
+                    "event_id": message_response["event_id"],
+                    "key": "👍",
+                    "rel_type": "m.annotation",
+                }
+            },
+            tok=user1_tok,
+        )
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 100,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make sure it has the foo-list we requested
+        self.assertListEqual(
+            list(response_body["lists"].keys()),
+            ["foo-list"],
+            response_body["lists"].keys(),
+        )
+
+        # Make sure the list includes the rooms in the right order
+        self.assertEqual(
+            len(response_body["lists"]["foo-list"]["ops"]),
+            1,
+            response_body["lists"]["foo-list"],
+        )
+        op = response_body["lists"]["foo-list"]["ops"][0]
+        self.assertEqual(op["op"], "SYNC")
+        self.assertEqual(op["range"], [0, 1])
+        # Note that we don't sort the rooms when the range includes all of the rooms, so
+        # we just assert that the rooms are included
+        self.assertIncludes(set(op["room_ids"]), {room_id1, room_id2}, exact=True)
+
+        # The `bump_stamp` for room1 should point at the latest message (not the
+        # reaction since it's not one of the `DEFAULT_BUMP_EVENT_TYPES`)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["bump_stamp"],
+            event_pos1.stream,
+            response_body["rooms"][room_id1],
+        )
+
+        # The `bump_stamp` for room2 should point at the latest message
+        self.assertEqual(
+            response_body["rooms"][room_id2]["bump_stamp"],
+            event_pos2.stream,
+            response_body["rooms"][room_id2],
+        )
+
+    def test_rooms_bump_stamp_backfill(self) -> None:
+        """
+        Test that `bump_stamp` ignores backfilled events, i.e. events with a
+        negative stream ordering.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote room
+        creator = "@user:other"
+        room_id = "!foo:other"
+        room_version = RoomVersions.V10
+        shared_kwargs = {
+            "room_id": room_id,
+            "room_version": room_version.identifier,
+        }
+
+        create_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[],
+                type=EventTypes.Create,
+                state_key="",
+                content={
+                    # The `ROOM_CREATOR` field could be removed if we used a room
+                    # version > 10 (in favor of relying on `sender`)
+                    EventContentFields.ROOM_CREATOR: creator,
+                    EventContentFields.ROOM_VERSION: room_version.identifier,
+                },
+                sender=creator,
+                **shared_kwargs,
+            )
+        )
+        creator_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[create_tuple[0].event_id],
+                auth_event_ids=[create_tuple[0].event_id],
+                type=EventTypes.Member,
+                state_key=creator,
+                content={"membership": Membership.JOIN},
+                sender=creator,
+                **shared_kwargs,
+            )
+        )
+        # We add a message event as a valid "bump type"
+        msg_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[creator_tuple[0].event_id],
+                auth_event_ids=[create_tuple[0].event_id],
+                type=EventTypes.Message,
+                content={"body": "foo", "msgtype": "m.text"},
+                sender=creator,
+                **shared_kwargs,
+            )
+        )
+        invite_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[msg_tuple[0].event_id],
+                auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id],
+                type=EventTypes.Member,
+                state_key=user1_id,
+                content={"membership": Membership.INVITE},
+                sender=creator,
+                **shared_kwargs,
+            )
+        )
+
+        remote_events_and_contexts = [
+            create_tuple,
+            creator_tuple,
+            msg_tuple,
+            invite_tuple,
+        ]
+
+        # Ensure the local HS knows the room version
+        self.get_success(self.store.store_room(room_id, creator, False, room_version))
+
+        # Persist these events as backfilled events.
+        for event, context in remote_events_and_contexts:
+            self.get_success(
+                self.persistence.persist_event(event, context, backfilled=True)
+            )
+
+        # Now we join the local user to the room. We want to make this feel as close to
+        # the real `process_remote_join()` as possible but we'd like to avoid some of
+        # the auth checks that would be done in the real code.
+        #
+        # FIXME: The test was originally written using this less-real
+        # `persist_event(...)` shortcut but it would be nice to use the real remote join
+        # process in a `FederatingHomeserverTestCase`.
+        flawed_join_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[invite_tuple[0].event_id],
+                # This doesn't work correctly to create an `EventContext` that includes
+                # both of these state events. I assume it's because we're working on our
+                # local homeserver which has the remote state set as `outlier`. We have
+                # to create our own EventContext below to get this right.
+                auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id],
+                type=EventTypes.Member,
+                state_key=user1_id,
+                content={"membership": Membership.JOIN},
+                sender=user1_id,
+                **shared_kwargs,
+            )
+        )
+        # We have to create our own context to get the state set correctly. If we use
+        # the `EventContext` from the `flawed_join_tuple`, the `current_state_events`
+        # table will only have the join event in it which should never happen in our
+        # real server.
+        join_event = flawed_join_tuple[0]
+        join_context = self.get_success(
+            self.state_handler.compute_event_context(
+                join_event,
+                state_ids_before_event={
+                    (e.type, e.state_key): e.event_id
+                    for e in [create_tuple[0], invite_tuple[0]]
+                },
+                partial_state=False,
+            )
+        )
+        self.get_success(self.persistence.persist_event(join_event, join_context))
+
+        # Doing an SS request should return a positive `bump_stamp`, even though
+        # the only event that matches the bump types has as negative stream
+        # ordering.
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 5,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0)
+
+    def test_rooms_bump_stamp_no_change_incremental(self) -> None:
+        """Test that the bump stamp is omitted if there has been no change"""
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+        )
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 100,
+                }
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Initial sync so we expect to see a bump stamp
+        self.assertIn("bump_stamp", response_body["rooms"][room_id1])
+
+        # Send an event that is not in the bump events list
+        self.helper.send_event(
+            room_id1, type="org.matrix.test", content={}, tok=user1_tok
+        )
+
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # There hasn't been a change to the bump stamps, so we ignore it
+        self.assertNotIn("bump_stamp", response_body["rooms"][room_id1])
+
+    def test_rooms_bump_stamp_change_incremental(self) -> None:
+        """Test that the bump stamp is included if there has been a change, even
+        if its not in the timeline"""
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+        )
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 2,
+                }
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Initial sync so we expect to see a bump stamp
+        self.assertIn("bump_stamp", response_body["rooms"][room_id1])
+        first_bump_stamp = response_body["rooms"][room_id1]["bump_stamp"]
+
+        # Send a bump event at the start.
+        self.helper.send(room_id1, "test", tok=user1_tok)
+
+        # Send events that are not in the bump events list to fill the timeline
+        for _ in range(5):
+            self.helper.send_event(
+                room_id1, type="org.matrix.test", content={}, tok=user1_tok
+            )
+
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # There was a bump event in the timeline gap, so we should see the bump
+        # stamp be updated.
+        self.assertIn("bump_stamp", response_body["rooms"][room_id1])
+        second_bump_stamp = response_body["rooms"][room_id1]["bump_stamp"]
+
+        self.assertGreater(second_bump_stamp, first_bump_stamp)
+
+    def test_rooms_bump_stamp_invites(self) -> None:
+        """
+        Test that `bump_stamp` is present and points to the membership event,
+        and not later events, for non-joined rooms
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+        )
+
+        # Invite user1 to the room
+        invite_response = self.helper.invite(room_id, user2_id, user1_id, tok=user2_tok)
+
+        # More messages happen after the invite
+        self.helper.send(room_id, "message in room1", tok=user2_tok)
+
+        # We expect the bump_stamp to match the invite.
+        invite_pos = self.get_success(
+            self.store.get_position_for_event(invite_response["event_id"])
+        )
+
+        # Doing an SS request should return a `bump_stamp` of the invite event,
+        # rather than the message that was sent after.
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 5,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        self.assertEqual(
+            response_body["rooms"][room_id]["bump_stamp"], invite_pos.stream
+        )
+
+    def test_rooms_meta_is_dm(self) -> None:
+        """
+        Test `rooms` `is_dm` is correctly set for DM rooms.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create a DM room
+        joined_dm_room_id = self._create_dm_room(
+            inviter_user_id=user1_id,
+            inviter_tok=user1_tok,
+            invitee_user_id=user2_id,
+            invitee_tok=user2_tok,
+            should_join_room=True,
+        )
+        invited_dm_room_id = self._create_dm_room(
+            inviter_user_id=user1_id,
+            inviter_tok=user1_tok,
+            invitee_user_id=user2_id,
+            invitee_tok=user2_tok,
+            should_join_room=False,
+        )
+
+        # Create a normal room
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+
+        # Create a room that user1 is invited to
+        invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Ensure DM's are correctly marked
+        self.assertDictEqual(
+            {
+                room_id: room.get("is_dm")
+                for room_id, room in response_body["rooms"].items()
+            },
+            {
+                invite_room_id: None,
+                room_id: None,
+                invited_dm_room_id: True,
+                joined_dm_room_id: True,
+            },
+        )
+
+    def test_old_room_with_unknown_room_version(self) -> None:
+        """Test that an old room with unknown room version does not break
+        sync."""
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # We first create a standard room, then we'll change the room version in
+        # the DB.
+        room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+        )
+
+        # Poke the database and update the room version to an unknown one.
+        self.get_success(
+            self.hs.get_datastores().main.db_pool.simple_update(
+                "rooms",
+                keyvalues={"room_id": room_id},
+                updatevalues={"room_version": "unknown-room-version"},
+                desc="updated-room-version",
+            )
+        )
+
+        # Invalidate method so that it returns the currently updated version
+        # instead of the cached version.
+        self.hs.get_datastores().main.get_room_version_id.invalidate((room_id,))
+
+        # For old unknown room versions we won't have an entry in this table
+        # (due to us skipping unknown room versions in the background update).
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="sliding_sync_joined_rooms",
+                keyvalues={"room_id": room_id},
+                desc="delete_sliding_room",
+            )
+        )
+
+        # Also invalidate some caches to ensure we pull things from the DB.
+        self.store._events_stream_cache._entity_to_key.pop(room_id)
+        self.store._get_max_event_pos.invalidate((room_id,))
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 5,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py
new file mode 100644
index 0000000000..7da51d4954
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py
@@ -0,0 +1,1125 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+
+from parameterized import parameterized, parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import EventTypes, Membership
+from synapse.handlers.sliding_sync import StateValues
+from synapse.rest.client import login, room, sync
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+from tests.test_utils.event_injection import mark_event_as_partial_state
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
+    """
+    Test `rooms.required_state` in the Sliding Sync API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.storage_controllers = hs.get_storage_controllers()
+
+        super().prepare(reactor, clock, hs)
+
+    def test_rooms_no_required_state(self) -> None:
+        """
+        Empty `rooms.required_state` should not return any state events in the room
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    # Empty `required_state`
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # No `required_state` in response
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("required_state"),
+            response_body["rooms"][room_id1],
+        )
+
+    def test_rooms_required_state_initial_sync(self) -> None:
+        """
+        Test `rooms.required_state` returns requested state events in the room during an
+        initial sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        [EventTypes.RoomHistoryVisibility, ""],
+                        # This one doesn't exist in the room
+                        [EventTypes.Tombstone, ""],
+                    ],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.RoomHistoryVisibility, "")],
+            },
+            exact=True,
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+    def test_rooms_required_state_incremental_sync(self) -> None:
+        """
+        Test `rooms.required_state` returns requested state events in the room during an
+        incremental sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        [EventTypes.RoomHistoryVisibility, ""],
+                        # This one doesn't exist in the room
+                        [EventTypes.Tombstone, ""],
+                    ],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Send a message so the room comes down sync.
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        # Make the incremental Sliding Sync request
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # We only return updates but only if we've sent the room down the
+        # connection before.
+        self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+    def test_rooms_incremental_sync_restart(self) -> None:
+        """
+        Test that after a restart (and so the in memory caches are reset) that
+        we correctly return an `M_UNKNOWN_POS`
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        [EventTypes.RoomHistoryVisibility, ""],
+                        # This one doesn't exist in the room
+                        [EventTypes.Tombstone, ""],
+                    ],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Reset the positions
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="sliding_sync_connections",
+                keyvalues={"user_id": user1_id},
+                desc="clear_sliding_sync_connections_cache",
+            )
+        )
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            method="POST",
+            path=self.sync_endpoint + f"?pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 400, channel.json_body)
+        self.assertEqual(
+            channel.json_body["errcode"], "M_UNKNOWN_POS", channel.json_body
+        )
+
+    def test_rooms_required_state_wildcard(self) -> None:
+        """
+        Test `rooms.required_state` returns all state events when using wildcard `["*", "*"]`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="namespaced",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+
+        # Make the Sliding Sync request with wildcards for the `event_type` and `state_key`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [StateValues.WILDCARD, StateValues.WILDCARD],
+                    ],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            # We should see all the state events in the room
+            state_map.values(),
+            exact=True,
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+    def test_rooms_required_state_wildcard_event_type(self) -> None:
+        """
+        Test `rooms.required_state` returns relevant state events when using wildcard in
+        the event_type `["*", "foobarbaz"]`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key=user2_id,
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+
+        # Make the Sliding Sync request with wildcards for the `event_type`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [StateValues.WILDCARD, user2_id],
+                    ],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # We expect at-least any state event with the `user2_id` as the `state_key`
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Member, user2_id)],
+                state_map[("org.matrix.foo_state", user2_id)],
+            },
+            # Ideally, this would be exact but we're currently returning all state
+            # events when the `event_type` is a wildcard.
+            exact=False,
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+    def test_rooms_required_state_wildcard_state_key(self) -> None:
+        """
+        Test `rooms.required_state` returns relevant state events when using wildcard in
+        the state_key `["foobarbaz","*"]`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request with wildcards for the `state_key`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Member, StateValues.WILDCARD],
+                    ],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Member, user1_id)],
+                state_map[(EventTypes.Member, user2_id)],
+            },
+            exact=True,
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+    def test_rooms_required_state_lazy_loading_room_members(self) -> None:
+        """
+        Test `rooms.required_state` returns people relevant to the timeline when
+        lazy-loading room members, `["m.room.member","$LAZY"]`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.join(room_id1, user3_id, tok=user3_tok)
+
+        self.helper.send(room_id1, "1", tok=user2_tok)
+        self.helper.send(room_id1, "2", tok=user3_tok)
+        self.helper.send(room_id1, "3", tok=user2_tok)
+
+        # Make the Sliding Sync request with lazy loading for the room members
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        [EventTypes.Member, StateValues.LAZY],
+                    ],
+                    "timeline_limit": 3,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # Only user2 and user3 sent events in the 3 events we see in the `timeline`
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.Member, user2_id)],
+                state_map[(EventTypes.Member, user3_id)],
+            },
+            exact=True,
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+    def test_rooms_required_state_me(self) -> None:
+        """
+        Test `rooms.required_state` correctly handles $ME.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        self.helper.send(room_id1, "1", tok=user2_tok)
+
+        # Also send normal state events with state keys of the users, first
+        # change the power levels to allow this.
+        self.helper.send_state(
+            room_id1,
+            event_type=EventTypes.PowerLevels,
+            body={"users": {user1_id: 50, user2_id: 100}},
+            tok=user2_tok,
+        )
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo",
+            state_key=user1_id,
+            body={},
+            tok=user1_tok,
+        )
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo",
+            state_key=user2_id,
+            body={},
+            tok=user2_tok,
+        )
+
+        # Make the Sliding Sync request with a request for '$ME'.
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        [EventTypes.Member, StateValues.ME],
+                        ["org.matrix.foo", StateValues.ME],
+                    ],
+                    "timeline_limit": 3,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # Only user2 and user3 sent events in the 3 events we see in the `timeline`
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.Member, user1_id)],
+                state_map[("org.matrix.foo", user1_id)],
+            },
+            exact=True,
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+    @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)])
+    def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None:
+        """
+        Test `rooms.required_state` should not return state past a leave/ban event.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        [EventTypes.Member, "*"],
+                        ["org.matrix.foo_state", ""],
+                    ],
+                    "timeline_limit": 3,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.join(room_id1, user3_id, tok=user3_tok)
+
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+
+        if stop_membership == Membership.LEAVE:
+            # User 1 leaves
+            self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        elif stop_membership == Membership.BAN:
+            # User 1 is banned
+            self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # Change the state after user 1 leaves
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "qux"},
+            tok=user2_tok,
+        )
+        self.helper.leave(room_id1, user3_id, tok=user3_tok)
+
+        # Make the Sliding Sync request with lazy loading for the room members
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # Only user2 and user3 sent events in the 3 events we see in the `timeline`
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.Member, user1_id)],
+                state_map[(EventTypes.Member, user2_id)],
+                state_map[(EventTypes.Member, user3_id)],
+                state_map[("org.matrix.foo_state", "")],
+            },
+            exact=True,
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+    def test_rooms_required_state_combine_superset(self) -> None:
+        """
+        Test `rooms.required_state` is combined across lists and room subscriptions.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.foo_state",
+            state_key="",
+            body={"foo": "bar"},
+            tok=user2_tok,
+        )
+        self.helper.send_state(
+            room_id1,
+            event_type="org.matrix.bar_state",
+            state_key="",
+            body={"bar": "qux"},
+            tok=user2_tok,
+        )
+
+        # Make the Sliding Sync request with wildcards for the `state_key`
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        [EventTypes.Member, user1_id],
+                    ],
+                    "timeline_limit": 0,
+                },
+                "bar-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Member, StateValues.WILDCARD],
+                        ["org.matrix.foo_state", ""],
+                    ],
+                    "timeline_limit": 0,
+                },
+            },
+            "room_subscriptions": {
+                room_id1: {
+                    "required_state": [["org.matrix.bar_state", ""]],
+                    "timeline_limit": 0,
+                }
+            },
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+                state_map[(EventTypes.Member, user1_id)],
+                state_map[(EventTypes.Member, user2_id)],
+                state_map[("org.matrix.foo_state", "")],
+                state_map[("org.matrix.bar_state", "")],
+            },
+            exact=True,
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
+
+    def test_rooms_required_state_partial_state(self) -> None:
+        """
+        Test partially-stated room are excluded if they require full state.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        _join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+        # Mark room2 as partial state
+        self.get_success(
+            mark_event_as_partial_state(self.hs, join_response2["event_id"], room_id2)
+        )
+
+        # Make the Sliding Sync request with examples where `must_await_full_state()` is
+        # `False`
+        sync_body = {
+            "lists": {
+                "no-state-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+                "other-state-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                    ],
+                    "timeline_limit": 0,
+                },
+                "lazy-load-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                        # Lazy-load room members
+                        [EventTypes.Member, StateValues.LAZY],
+                        # Local member
+                        [EventTypes.Member, user2_id],
+                    ],
+                    "timeline_limit": 0,
+                },
+                "local-members-only-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        # Own user ID
+                        [EventTypes.Member, user1_id],
+                        # Local member
+                        [EventTypes.Member, user2_id],
+                    ],
+                    "timeline_limit": 0,
+                },
+                "me-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        # Own user ID
+                        [EventTypes.Member, StateValues.ME],
+                        # Local member
+                        [EventTypes.Member, user2_id],
+                    ],
+                    "timeline_limit": 0,
+                },
+                "wildcard-type-local-state-key-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        ["*", user1_id],
+                        # Not a user ID
+                        ["*", "foobarbaz"],
+                        # Not a user ID
+                        ["*", "foo.bar.baz"],
+                        # Not a user ID
+                        ["*", "@foo"],
+                    ],
+                    "timeline_limit": 0,
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # The list should include both rooms now because we don't need full state
+        for list_key in response_body["lists"].keys():
+            self.assertIncludes(
+                set(response_body["lists"][list_key]["ops"][0]["room_ids"]),
+                {room_id2, room_id1},
+                exact=True,
+                message=f"Expected all rooms to show up for list_key={list_key}. Response "
+                + str(response_body["lists"][list_key]),
+            )
+
+        # Take each of the list variants and apply them to room subscriptions to make
+        # sure the same rules apply
+        for list_key in sync_body["lists"].keys():
+            sync_body_for_subscriptions = {
+                "room_subscriptions": {
+                    room_id1: {
+                        "required_state": sync_body["lists"][list_key][
+                            "required_state"
+                        ],
+                        "timeline_limit": 0,
+                    },
+                    room_id2: {
+                        "required_state": sync_body["lists"][list_key][
+                            "required_state"
+                        ],
+                        "timeline_limit": 0,
+                    },
+                }
+            }
+            response_body, _ = self.do_sync(sync_body_for_subscriptions, tok=user1_tok)
+
+            self.assertIncludes(
+                set(response_body["rooms"].keys()),
+                {room_id2, room_id1},
+                exact=True,
+                message=f"Expected all rooms to show up for test_key={list_key}.",
+            )
+
+        # =====================================================================
+
+        # Make the Sliding Sync request with examples where `must_await_full_state()` is
+        # `True`
+        sync_body = {
+            "lists": {
+                "wildcard-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        ["*", "*"],
+                    ],
+                    "timeline_limit": 0,
+                },
+                "wildcard-type-remote-state-key-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        ["*", "@some:remote"],
+                        # Not a user ID
+                        ["*", "foobarbaz"],
+                        # Not a user ID
+                        ["*", "foo.bar.baz"],
+                        # Not a user ID
+                        ["*", "@foo"],
+                    ],
+                    "timeline_limit": 0,
+                },
+                "remote-member-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        # Own user ID
+                        [EventTypes.Member, user1_id],
+                        # Remote member
+                        [EventTypes.Member, "@some:remote"],
+                        # Local member
+                        [EventTypes.Member, user2_id],
+                    ],
+                    "timeline_limit": 0,
+                },
+                "lazy-but-remote-member-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        # Lazy-load room members
+                        [EventTypes.Member, StateValues.LAZY],
+                        # Remote member
+                        [EventTypes.Member, "@some:remote"],
+                    ],
+                    "timeline_limit": 0,
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make sure the list includes room1 but room2 is excluded because it's still
+        # partially-stated
+        for list_key in response_body["lists"].keys():
+            self.assertIncludes(
+                set(response_body["lists"][list_key]["ops"][0]["room_ids"]),
+                {room_id1},
+                exact=True,
+                message=f"Expected only fully-stated rooms to show up for list_key={list_key}. Response "
+                + str(response_body["lists"][list_key]),
+            )
+
+        # Take each of the list variants and apply them to room subscriptions to make
+        # sure the same rules apply
+        for list_key in sync_body["lists"].keys():
+            sync_body_for_subscriptions = {
+                "room_subscriptions": {
+                    room_id1: {
+                        "required_state": sync_body["lists"][list_key][
+                            "required_state"
+                        ],
+                        "timeline_limit": 0,
+                    },
+                    room_id2: {
+                        "required_state": sync_body["lists"][list_key][
+                            "required_state"
+                        ],
+                        "timeline_limit": 0,
+                    },
+                }
+            }
+            response_body, _ = self.do_sync(sync_body_for_subscriptions, tok=user1_tok)
+
+            self.assertIncludes(
+                set(response_body["rooms"].keys()),
+                {room_id1},
+                exact=True,
+                message=f"Expected only fully-stated rooms to show up for test_key={list_key}.",
+            )
+
+    def test_rooms_required_state_expand(self) -> None:
+        """Test that when we expand the required state argument we get the
+        expanded state, and not just the changes to the new expanded."""
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a room with a room name.
+        room_id1 = self.helper.create_room_as(
+            user1_id, tok=user1_tok, extra_content={"name": "Foo"}
+        )
+
+        # Only request the state event to begin with
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                    ],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+            },
+            exact=True,
+        )
+
+        # Send a message so the room comes down sync.
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        # Update the sliding sync requests to include the room name
+        sync_body["lists"]["foo-list"]["required_state"] = [
+            [EventTypes.Create, ""],
+            [EventTypes.Name, ""],
+        ]
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # We should see the room name, even though there haven't been any
+        # changes.
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Name, "")],
+            },
+            exact=True,
+        )
+
+        # Send a message so the room comes down sync.
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        # We should not see any state changes.
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+        self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
+
+    def test_rooms_required_state_expand_retract_expand(self) -> None:
+        """Test that when expanding, retracting and then expanding the required
+        state, we get the changes that happened."""
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a room with a room name.
+        room_id1 = self.helper.create_room_as(
+            user1_id, tok=user1_tok, extra_content={"name": "Foo"}
+        )
+
+        # Only request the state event to begin with
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                    ],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+            },
+            exact=True,
+        )
+
+        # Send a message so the room comes down sync.
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        # Update the sliding sync requests to include the room name
+        sync_body["lists"]["foo-list"]["required_state"] = [
+            [EventTypes.Create, ""],
+            [EventTypes.Name, ""],
+        ]
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # We should see the room name, even though there haven't been any
+        # changes.
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Name, "")],
+            },
+            exact=True,
+        )
+
+        # Update the room name
+        self.helper.send_state(
+            room_id1, "m.room.name", {"name": "Bar"}, state_key="", tok=user1_tok
+        )
+
+        # Update the sliding sync requests to exclude the room name again
+        sync_body["lists"]["foo-list"]["required_state"] = [
+            [EventTypes.Create, ""],
+        ]
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # We should not see the updated room name in state (though it will be in
+        # the timeline).
+        self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
+
+        # Send a message so the room comes down sync.
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        # Update the sliding sync requests to include the room name again
+        sync_body["lists"]["foo-list"]["required_state"] = [
+            [EventTypes.Create, ""],
+            [EventTypes.Name, ""],
+        ]
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # We should see the *new* room name, even though there haven't been any
+        # changes.
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Name, "")],
+            },
+            exact=True,
+        )
+
+    def test_rooms_required_state_expand_deduplicate(self) -> None:
+        """Test that when expanding, retracting and then expanding the required
+        state, we don't get the state down again if it hasn't changed"""
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a room with a room name.
+        room_id1 = self.helper.create_room_as(
+            user1_id, tok=user1_tok, extra_content={"name": "Foo"}
+        )
+
+        # Only request the state event to begin with
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        [EventTypes.Create, ""],
+                    ],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Create, "")],
+            },
+            exact=True,
+        )
+
+        # Send a message so the room comes down sync.
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        # Update the sliding sync requests to include the room name
+        sync_body["lists"]["foo-list"]["required_state"] = [
+            [EventTypes.Create, ""],
+            [EventTypes.Name, ""],
+        ]
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # We should see the room name, even though there haven't been any
+        # changes.
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            {
+                state_map[(EventTypes.Name, "")],
+            },
+            exact=True,
+        )
+
+        # Send a message so the room comes down sync.
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        # Update the sliding sync requests to exclude the room name again
+        sync_body["lists"]["foo-list"]["required_state"] = [
+            [EventTypes.Create, ""],
+        ]
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # We should not see any state updates
+        self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
+
+        # Send a message so the room comes down sync.
+        self.helper.send(room_id1, "msg", tok=user1_tok)
+
+        # Update the sliding sync requests to include the room name again
+        sync_body["lists"]["foo-list"]["required_state"] = [
+            [EventTypes.Create, ""],
+            [EventTypes.Name, ""],
+        ]
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+
+        # We should not see the room name again, as we have already sent that
+        # down.
+        self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py
new file mode 100644
index 0000000000..2293994793
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py
@@ -0,0 +1,719 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+from typing import List, Optional
+
+from parameterized import parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import EventTypes
+from synapse.rest.client import login, room, sync
+from synapse.server import HomeServer
+from synapse.types import StrSequence
+from synapse.util import Clock
+
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase):
+    """
+    Test `rooms.timeline` in the Sliding Sync API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.storage_controllers = hs.get_storage_controllers()
+
+        super().prepare(reactor, clock, hs)
+
+    def _assertListEqual(
+        self,
+        actual_items: StrSequence,
+        expected_items: StrSequence,
+        message: Optional[str] = None,
+    ) -> None:
+        """
+        Like `self.assertListEqual(...)` but with an actually understandable diff message.
+        """
+
+        if actual_items == expected_items:
+            return
+
+        expected_lines: List[str] = []
+        for expected_item in expected_items:
+            is_expected_in_actual = expected_item in actual_items
+            expected_lines.append(
+                "{}  {}".format(" " if is_expected_in_actual else "?", expected_item)
+            )
+
+        actual_lines: List[str] = []
+        for actual_item in actual_items:
+            is_actual_in_expected = actual_item in expected_items
+            actual_lines.append(
+                "{}  {}".format("+" if is_actual_in_expected else " ", actual_item)
+            )
+
+        newline = "\n"
+        expected_string = f"Expected items to be in actual ('?' = missing expected items):\n [\n{newline.join(expected_lines)}\n ]"
+        actual_string = f"Actual ('+' = found expected items):\n [\n{newline.join(actual_lines)}\n ]"
+        first_message = "Items must"
+        diff_message = f"{first_message}\n{expected_string}\n{actual_string}"
+
+        self.fail(f"{diff_message}\n{message}")
+
+    def _assertTimelineEqual(
+        self,
+        *,
+        room_id: str,
+        actual_event_ids: List[str],
+        expected_event_ids: List[str],
+        message: Optional[str] = None,
+    ) -> None:
+        """
+        Like `self.assertListEqual(...)` for event IDs in a room but will give a nicer
+        output with context for what each event_id is (type, stream_ordering, content,
+        etc).
+        """
+        if actual_event_ids == expected_event_ids:
+            return
+
+        event_id_set = set(actual_event_ids + expected_event_ids)
+        events = self.get_success(self.store.get_events(event_id_set))
+
+        def event_id_to_string(event_id: str) -> str:
+            event = events.get(event_id)
+            if event:
+                state_key = event.get_state_key()
+                state_key_piece = f", {state_key}" if state_key is not None else ""
+                return (
+                    f"({event.internal_metadata.stream_ordering: >2}, {event.internal_metadata.instance_name}) "
+                    + f"{event.event_id} ({event.type}{state_key_piece}) {event.content.get('membership', '')}{event.content.get('body', '')}"
+                )
+
+            return f"{event_id} <event not found in room_id={room_id}>"
+
+        self._assertListEqual(
+            actual_items=[
+                event_id_to_string(event_id) for event_id in actual_event_ids
+            ],
+            expected_items=[
+                event_id_to_string(event_id) for event_id in expected_event_ids
+            ],
+            message=message,
+        )
+
+    def test_rooms_limited_initial_sync(self) -> None:
+        """
+        Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit`
+        on initial sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        event_response1 = self.helper.send(room_id1, "activity1", tok=user2_tok)
+        event_response2 = self.helper.send(room_id1, "activity2", tok=user2_tok)
+        event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok)
+        event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok)
+        event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok)
+        user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 3,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # We expect to saturate the `timeline_limit` (there are more than 3 messages in the room)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["limited"],
+            True,
+            response_body["rooms"][room_id1],
+        )
+        # Check to make sure the latest events are returned
+        self._assertTimelineEqual(
+            room_id=room_id1,
+            actual_event_ids=[
+                event["event_id"]
+                for event in response_body["rooms"][room_id1]["timeline"]
+            ],
+            expected_event_ids=[
+                event_response4["event_id"],
+                event_response5["event_id"],
+                user1_join_response["event_id"],
+            ],
+            message=str(response_body["rooms"][room_id1]["timeline"]),
+        )
+
+        # Check to make sure the `prev_batch` points at the right place
+        prev_batch_token = response_body["rooms"][room_id1]["prev_batch"]
+
+        # If we use the `prev_batch` token to look backwards we should see
+        # `event3` and older next.
+        channel = self.make_request(
+            "GET",
+            f"/rooms/{room_id1}/messages?from={prev_batch_token}&dir=b&limit=3",
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        self.assertListEqual(
+            [
+                event_response3["event_id"],
+                event_response2["event_id"],
+                event_response1["event_id"],
+            ],
+            [ev["event_id"] for ev in channel.json_body["chunk"]],
+        )
+
+        # With no `from_token` (initial sync), it's all historical since there is no
+        # "live" range
+        self.assertEqual(
+            response_body["rooms"][room_id1]["num_live"],
+            0,
+            response_body["rooms"][room_id1],
+        )
+
+    def test_rooms_not_limited_initial_sync(self) -> None:
+        """
+        Test that we mark `rooms` as `limited=False` when there are no more events to
+        paginate to.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity1", tok=user2_tok)
+        self.helper.send(room_id1, "activity2", tok=user2_tok)
+        self.helper.send(room_id1, "activity3", tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        timeline_limit = 100
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": timeline_limit,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # The timeline should be `limited=False` because we have all of the events (no
+        # more to paginate to)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["limited"],
+            False,
+            response_body["rooms"][room_id1],
+        )
+        expected_number_of_events = 9
+        # We're just looking to make sure we got all of the events before hitting the `timeline_limit`
+        self.assertEqual(
+            len(response_body["rooms"][room_id1]["timeline"]),
+            expected_number_of_events,
+            response_body["rooms"][room_id1]["timeline"],
+        )
+        self.assertLessEqual(expected_number_of_events, timeline_limit)
+
+        # With no `from_token` (initial sync), it's all historical since there is no
+        # "live" token range.
+        self.assertEqual(
+            response_body["rooms"][room_id1]["num_live"],
+            0,
+            response_body["rooms"][room_id1],
+        )
+
+    def test_rooms_incremental_sync(self) -> None:
+        """
+        Test `rooms` data during an incremental sync after an initial sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+        self.helper.send(room_id1, "activity before initial sync1", tok=user2_tok)
+
+        # Make an initial Sliding Sync request to grab a token. This is also a sanity
+        # check that we can go from initial to incremental sync.
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 3,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Send some events but don't send enough to saturate the `timeline_limit`.
+        # We want to later test that we only get the new events since the `next_pos`
+        event_response2 = self.helper.send(room_id1, "activity after2", tok=user2_tok)
+        event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
+
+        # Make an incremental Sliding Sync request (what we're trying to test)
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # We only expect to see the new events since the last sync which isn't enough to
+        # fill up the `timeline_limit`.
+        self.assertEqual(
+            response_body["rooms"][room_id1]["limited"],
+            False,
+            f'Our `timeline_limit` was {sync_body["lists"]["foo-list"]["timeline_limit"]} '
+            + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
+            + str(response_body["rooms"][room_id1]),
+        )
+        # Check to make sure the latest events are returned
+        self._assertTimelineEqual(
+            room_id=room_id1,
+            actual_event_ids=[
+                event["event_id"]
+                for event in response_body["rooms"][room_id1]["timeline"]
+            ],
+            expected_event_ids=[
+                event_response2["event_id"],
+                event_response3["event_id"],
+            ],
+            message=str(response_body["rooms"][room_id1]["timeline"]),
+        )
+
+        # All events are "live"
+        self.assertEqual(
+            response_body["rooms"][room_id1]["num_live"],
+            2,
+            response_body["rooms"][room_id1],
+        )
+
+    def test_rooms_newly_joined_incremental_sync(self) -> None:
+        """
+        Test that when we make an incremental sync with a `newly_joined` `rooms`, we are
+        able to see some historical events before the `from_token`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity before token1", tok=user2_tok)
+        event_response2 = self.helper.send(
+            room_id1, "activity before token2", tok=user2_tok
+        )
+
+        # The `timeline_limit` is set to 4 so we can at least see one historical event
+        # before the `from_token`. We should see historical events because this is a
+        # `newly_joined` room.
+        timeline_limit = 4
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": timeline_limit,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Join the room after the `from_token` which will make us consider this room as
+        # `newly_joined`.
+        user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # Send some events but don't send enough to saturate the `timeline_limit`.
+        # We want to later test that we only get the new events since the `next_pos`
+        event_response3 = self.helper.send(
+            room_id1, "activity after token3", tok=user2_tok
+        )
+        event_response4 = self.helper.send(
+            room_id1, "activity after token4", tok=user2_tok
+        )
+
+        # Make an incremental Sliding Sync request (what we're trying to test)
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # We should see the new events and the rest should be filled with historical
+        # events which will make us `limited=True` since there are more to paginate to.
+        self.assertEqual(
+            response_body["rooms"][room_id1]["limited"],
+            True,
+            f"Our `timeline_limit` was {timeline_limit} "
+            + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
+            + str(response_body["rooms"][room_id1]),
+        )
+        # Check to make sure that the "live" and historical events are returned
+        self._assertTimelineEqual(
+            room_id=room_id1,
+            actual_event_ids=[
+                event["event_id"]
+                for event in response_body["rooms"][room_id1]["timeline"]
+            ],
+            expected_event_ids=[
+                event_response2["event_id"],
+                user1_join_response["event_id"],
+                event_response3["event_id"],
+                event_response4["event_id"],
+            ],
+            message=str(response_body["rooms"][room_id1]["timeline"]),
+        )
+
+        # Only events after the `from_token` are "live" (join, event3, event4)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["num_live"],
+            3,
+            response_body["rooms"][room_id1],
+        )
+
+    def test_rooms_ban_initial_sync(self) -> None:
+        """
+        Test that `rooms` we are banned from in an intial sync only allows us to see
+        timeline events up to the ban event.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity before1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before2", tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
+        event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
+        user1_ban_response = self.helper.ban(
+            room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+
+        self.helper.send(room_id1, "activity after5", tok=user2_tok)
+        self.helper.send(room_id1, "activity after6", tok=user2_tok)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 3,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # We should see events before the ban but not after
+        self._assertTimelineEqual(
+            room_id=room_id1,
+            actual_event_ids=[
+                event["event_id"]
+                for event in response_body["rooms"][room_id1]["timeline"]
+            ],
+            expected_event_ids=[
+                event_response3["event_id"],
+                event_response4["event_id"],
+                user1_ban_response["event_id"],
+            ],
+            message=str(response_body["rooms"][room_id1]["timeline"]),
+        )
+        # No "live" events in an initial sync (no `from_token` to define the "live"
+        # range)
+        self.assertEqual(
+            response_body["rooms"][room_id1]["num_live"],
+            0,
+            response_body["rooms"][room_id1],
+        )
+        # There are more events to paginate to
+        self.assertEqual(
+            response_body["rooms"][room_id1]["limited"],
+            True,
+            response_body["rooms"][room_id1],
+        )
+
+    def test_rooms_ban_incremental_sync1(self) -> None:
+        """
+        Test that `rooms` we are banned from during the next incremental sync only
+        allows us to see timeline events up to the ban event.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity before1", tok=user2_tok)
+        self.helper.send(room_id1, "activity before2", tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 4,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
+        event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
+        # The ban is within the token range (between the `from_token` and the sliding
+        # sync request)
+        user1_ban_response = self.helper.ban(
+            room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+
+        self.helper.send(room_id1, "activity after5", tok=user2_tok)
+        self.helper.send(room_id1, "activity after6", tok=user2_tok)
+
+        # Make the incremental Sliding Sync request
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # We should see events before the ban but not after
+        self._assertTimelineEqual(
+            room_id=room_id1,
+            actual_event_ids=[
+                event["event_id"]
+                for event in response_body["rooms"][room_id1]["timeline"]
+            ],
+            expected_event_ids=[
+                event_response3["event_id"],
+                event_response4["event_id"],
+                user1_ban_response["event_id"],
+            ],
+            message=str(response_body["rooms"][room_id1]["timeline"]),
+        )
+        # All live events in the incremental sync
+        self.assertEqual(
+            response_body["rooms"][room_id1]["num_live"],
+            3,
+            response_body["rooms"][room_id1],
+        )
+        # There aren't anymore events to paginate to in this range
+        self.assertEqual(
+            response_body["rooms"][room_id1]["limited"],
+            False,
+            response_body["rooms"][room_id1],
+        )
+
+    def test_rooms_ban_incremental_sync2(self) -> None:
+        """
+        Test that `rooms` we are banned from before the incremental sync don't return
+        any events in the timeline.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.send(room_id1, "activity before1", tok=user2_tok)
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        self.helper.send(room_id1, "activity after2", tok=user2_tok)
+        # The ban is before we get our `from_token`
+        self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        self.helper.send(room_id1, "activity after3", tok=user2_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 4,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        self.helper.send(room_id1, "activity after4", tok=user2_tok)
+
+        # Make the incremental Sliding Sync request
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # Nothing to see for this banned user in the room in the token range
+        self.assertIsNone(response_body["rooms"].get(room_id1))
+
+    def test_increasing_timeline_range_sends_more_messages(self) -> None:
+        """
+        Test that increasing the timeline limit via room subscriptions sends the
+        room down with more messages in a limited sync.
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [[EventTypes.Create, ""]],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+
+        message_events = []
+        for _ in range(10):
+            resp = self.helper.send(room_id1, "msg", tok=user1_tok)
+            message_events.append(resp["event_id"])
+
+        # Make the first Sliding Sync request
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+        room_response = response_body["rooms"][room_id1]
+
+        self.assertEqual(room_response["initial"], True)
+        self.assertNotIn("unstable_expanded_timeline", room_response)
+        self.assertEqual(room_response["limited"], True)
+
+        # We only expect the last message at first
+        self._assertTimelineEqual(
+            room_id=room_id1,
+            actual_event_ids=[event["event_id"] for event in room_response["timeline"]],
+            expected_event_ids=message_events[-1:],
+            message=str(room_response["timeline"]),
+        )
+
+        # We also expect to get the create event state.
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+        self._assertRequiredStateIncludes(
+            room_response["required_state"],
+            {state_map[(EventTypes.Create, "")]},
+            exact=True,
+        )
+
+        # Now do another request with a room subscription with an increased timeline limit
+        sync_body["room_subscriptions"] = {
+            room_id1: {
+                "required_state": [],
+                "timeline_limit": 10,
+            }
+        }
+
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+        room_response = response_body["rooms"][room_id1]
+
+        self.assertNotIn("initial", room_response)
+        self.assertEqual(room_response["unstable_expanded_timeline"], True)
+        self.assertEqual(room_response["limited"], True)
+
+        # Now we expect all the messages
+        self._assertTimelineEqual(
+            room_id=room_id1,
+            actual_event_ids=[event["event_id"] for event in room_response["timeline"]],
+            expected_event_ids=message_events,
+            message=str(room_response["timeline"]),
+        )
+
+        # We don't expect to get the room create down, as nothing has changed.
+        self.assertNotIn("required_state", room_response)
+
+        # Decreasing the timeline limit shouldn't resend any events
+        sync_body["room_subscriptions"] = {
+            room_id1: {
+                "required_state": [],
+                "timeline_limit": 5,
+            }
+        }
+
+        event_response = self.helper.send(room_id1, "msg", tok=user1_tok)
+        latest_event_id = event_response["event_id"]
+
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+        room_response = response_body["rooms"][room_id1]
+
+        self.assertNotIn("initial", room_response)
+        self.assertNotIn("unstable_expanded_timeline", room_response)
+        self.assertEqual(room_response["limited"], False)
+
+        self._assertTimelineEqual(
+            room_id=room_id1,
+            actual_event_ids=[event["event_id"] for event in room_response["timeline"]],
+            expected_event_ids=[latest_event_id],
+            message=str(room_response["timeline"]),
+        )
+
+        # Increasing the limit to what it was before also should not resend any
+        # events
+        sync_body["room_subscriptions"] = {
+            room_id1: {
+                "required_state": [],
+                "timeline_limit": 10,
+            }
+        }
+
+        event_response = self.helper.send(room_id1, "msg", tok=user1_tok)
+        latest_event_id = event_response["event_id"]
+
+        response_body, from_token = self.do_sync(
+            sync_body, since=from_token, tok=user1_tok
+        )
+        room_response = response_body["rooms"][room_id1]
+
+        self.assertNotIn("initial", room_response)
+        self.assertNotIn("unstable_expanded_timeline", room_response)
+        self.assertEqual(room_response["limited"], False)
+
+        self._assertTimelineEqual(
+            room_id=room_id1,
+            actual_event_ids=[event["event_id"] for event in room_response["timeline"]],
+            expected_event_ids=[latest_event_id],
+            message=str(room_response["timeline"]),
+        )
diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py
new file mode 100644
index 0000000000..ea3ca57957
--- /dev/null
+++ b/tests/rest/client/sliding_sync/test_sliding_sync.py
@@ -0,0 +1,1467 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+import logging
+from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple
+from unittest.mock import AsyncMock
+
+from parameterized import parameterized, parameterized_class
+from typing_extensions import assert_never
+
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import (
+    AccountDataTypes,
+    EventContentFields,
+    EventTypes,
+    JoinRules,
+    Membership,
+    RoomTypes,
+)
+from synapse.api.room_versions import RoomVersions
+from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict
+from synapse.events.snapshot import EventContext
+from synapse.handlers.sliding_sync import StateValues
+from synapse.rest.client import account_data, devices, login, receipts, room, sync
+from synapse.server import HomeServer
+from synapse.types import (
+    JsonDict,
+    RoomStreamToken,
+    SlidingSyncStreamToken,
+    StreamKeyType,
+    StreamToken,
+)
+from synapse.util import Clock
+from synapse.util.stringutils import random_string
+
+from tests import unittest
+from tests.server import TimedOutException
+from tests.test_utils.event_injection import create_event
+
+logger = logging.getLogger(__name__)
+
+
+class SlidingSyncBase(unittest.HomeserverTestCase):
+    """Base class for sliding sync test cases"""
+
+    # Flag as to whether to use the new sliding sync tables or not
+    #
+    # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+    # foreground update for
+    # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+    # https://github.com/element-hq/synapse/issues/17623)
+    use_new_tables: bool = True
+
+    sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync"
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+        # foreground update for
+        # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+        # https://github.com/element-hq/synapse/issues/17623)
+        hs.get_datastores().main.have_finished_sliding_sync_background_jobs = AsyncMock(  # type: ignore[method-assign]
+            return_value=self.use_new_tables
+        )
+
+    def default_config(self) -> JsonDict:
+        config = super().default_config()
+        # Enable sliding sync
+        config["experimental_features"] = {"msc3575_enabled": True}
+        return config
+
+    def do_sync(
+        self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str
+    ) -> Tuple[JsonDict, str]:
+        """Do a sliding sync request with given body.
+
+        Asserts the request was successful.
+
+        Attributes:
+            sync_body: The full request body to use
+            since: Optional since token
+            tok: Access token to use
+
+        Returns:
+            A tuple of the response body and the `pos` field.
+        """
+
+        sync_path = self.sync_endpoint
+        if since:
+            sync_path += f"?pos={since}"
+
+        channel = self.make_request(
+            method="POST",
+            path=sync_path,
+            content=sync_body,
+            access_token=tok,
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        return channel.json_body, channel.json_body["pos"]
+
+    def _assertRequiredStateIncludes(
+        self,
+        actual_required_state: Any,
+        expected_state_events: Iterable[EventBase],
+        exact: bool = False,
+    ) -> None:
+        """
+        Wrapper around `assertIncludes` to give slightly better looking diff error
+        messages that include some context "$event_id (type, state_key)".
+
+        Args:
+            actual_required_state: The "required_state" of a room from a Sliding Sync
+                request response.
+            expected_state_events: The expected state events to be included in the
+                `actual_required_state`.
+            exact: Whether the actual state should be exactly equal to the expected
+                state (no extras).
+        """
+
+        assert isinstance(actual_required_state, list)
+        for event in actual_required_state:
+            assert isinstance(event, dict)
+
+        self.assertIncludes(
+            {
+                f'{event["event_id"]} ("{event["type"]}", "{event["state_key"]}")'
+                for event in actual_required_state
+            },
+            {
+                f'{event.event_id} ("{event.type}", "{event.state_key}")'
+                for event in expected_state_events
+            },
+            exact=exact,
+            # Message to help understand the diff in context
+            message=str(actual_required_state),
+        )
+
+    def _add_new_dm_to_global_account_data(
+        self, source_user_id: str, target_user_id: str, target_room_id: str
+    ) -> None:
+        """
+        Helper to handle inserting a new DM for the source user into global account data
+        (handles all of the list merging).
+
+        Args:
+            source_user_id: The user ID of the DM mapping we're going to update
+            target_user_id: User ID of the person the DM is with
+            target_room_id: Room ID of the DM
+        """
+        store = self.hs.get_datastores().main
+
+        # Get the current DM map
+        existing_dm_map = self.get_success(
+            store.get_global_account_data_by_type_for_user(
+                source_user_id, AccountDataTypes.DIRECT
+            )
+        )
+        # Scrutinize the account data since it has no concrete type. We're just copying
+        # everything into a known type. It should be a mapping from user ID to a list of
+        # room IDs. Ignore anything else.
+        new_dm_map: Dict[str, List[str]] = {}
+        if isinstance(existing_dm_map, dict):
+            for user_id, room_ids in existing_dm_map.items():
+                if isinstance(user_id, str) and isinstance(room_ids, list):
+                    for room_id in room_ids:
+                        if isinstance(room_id, str):
+                            new_dm_map[user_id] = new_dm_map.get(user_id, []) + [
+                                room_id
+                            ]
+
+        # Add the new DM to the map
+        new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [
+            target_room_id
+        ]
+        # Save the DM map to global account data
+        self.get_success(
+            store.add_account_data_for_user(
+                source_user_id,
+                AccountDataTypes.DIRECT,
+                new_dm_map,
+            )
+        )
+
+    def _create_dm_room(
+        self,
+        inviter_user_id: str,
+        inviter_tok: str,
+        invitee_user_id: str,
+        invitee_tok: str,
+        should_join_room: bool = True,
+    ) -> str:
+        """
+        Helper to create a DM room as the "inviter" and invite the "invitee" user to the
+        room. The "invitee" user also will join the room. The `m.direct` account data
+        will be set for both users.
+        """
+        # Create a room and send an invite the other user
+        room_id = self.helper.create_room_as(
+            inviter_user_id,
+            is_public=False,
+            tok=inviter_tok,
+        )
+        self.helper.invite(
+            room_id,
+            src=inviter_user_id,
+            targ=invitee_user_id,
+            tok=inviter_tok,
+            extra_data={"is_direct": True},
+        )
+        if should_join_room:
+            # Person that was invited joins the room
+            self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
+
+        # Mimic the client setting the room as a direct message in the global account
+        # data for both users.
+        self._add_new_dm_to_global_account_data(
+            invitee_user_id, inviter_user_id, room_id
+        )
+        self._add_new_dm_to_global_account_data(
+            inviter_user_id, invitee_user_id, room_id
+        )
+
+        return room_id
+
+    _remote_invite_count: int = 0
+
+    def _create_remote_invite_room_for_user(
+        self,
+        invitee_user_id: str,
+        unsigned_invite_room_state: Optional[List[StrippedStateEvent]],
+    ) -> str:
+        """
+        Create a fake invite for a remote room and persist it.
+
+        We don't have any state for these kind of rooms and can only rely on the
+        stripped state included in the unsigned portion of the invite event to identify
+        the room.
+
+        Args:
+            invitee_user_id: The person being invited
+            unsigned_invite_room_state: List of stripped state events to assist the
+                receiver in identifying the room.
+
+        Returns:
+            The room ID of the remote invite room
+        """
+        store = self.hs.get_datastores().main
+
+        invite_room_id = f"!test_room{self._remote_invite_count}:remote_server"
+
+        invite_event_dict = {
+            "room_id": invite_room_id,
+            "sender": "@inviter:remote_server",
+            "state_key": invitee_user_id,
+            "depth": 1,
+            "origin_server_ts": 1,
+            "type": EventTypes.Member,
+            "content": {"membership": Membership.INVITE},
+            "auth_events": [],
+            "prev_events": [],
+        }
+        if unsigned_invite_room_state is not None:
+            serialized_stripped_state_events = []
+            for stripped_event in unsigned_invite_room_state:
+                serialized_stripped_state_events.append(
+                    {
+                        "type": stripped_event.type,
+                        "state_key": stripped_event.state_key,
+                        "sender": stripped_event.sender,
+                        "content": stripped_event.content,
+                    }
+                )
+
+            invite_event_dict["unsigned"] = {
+                "invite_room_state": serialized_stripped_state_events
+            }
+
+        invite_event = make_event_from_dict(
+            invite_event_dict,
+            room_version=RoomVersions.V10,
+        )
+        invite_event.internal_metadata.outlier = True
+        invite_event.internal_metadata.out_of_band_membership = True
+
+        self.get_success(
+            store.maybe_store_room_on_outlier_membership(
+                room_id=invite_room_id, room_version=invite_event.room_version
+            )
+        )
+        context = EventContext.for_outlier(self.hs.get_storage_controllers())
+        persist_controller = self.hs.get_storage_controllers().persistence
+        assert persist_controller is not None
+        self.get_success(persist_controller.persist_event(invite_event, context))
+
+        self._remote_invite_count += 1
+
+        return invite_room_id
+
+    def _bump_notifier_wait_for_events(
+        self,
+        user_id: str,
+        wake_stream_key: Literal[
+            StreamKeyType.ACCOUNT_DATA,
+            StreamKeyType.PRESENCE,
+        ],
+    ) -> None:
+        """
+        Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding
+        Sync results.
+
+        Args:
+            user_id: The user ID to wake up the notifier for
+            wake_stream_key: The stream key to wake up. This will create an actual new
+                entity in that stream so it's best to choose one that won't affect the
+                Sliding Sync results you're testing for. In other words, if your testing
+                account data, choose `StreamKeyType.PRESENCE` instead. We support two
+                possible stream keys because you're probably testing one or the other so
+                one is always a "safe" option.
+        """
+        # We're expecting some new activity from this point onwards
+        from_token = self.hs.get_event_sources().get_current_token()
+
+        triggered_notifier_wait_for_events = False
+
+        async def _on_new_acivity(
+            before_token: StreamToken, after_token: StreamToken
+        ) -> bool:
+            nonlocal triggered_notifier_wait_for_events
+            triggered_notifier_wait_for_events = True
+            return True
+
+        notifier = self.hs.get_notifier()
+
+        # Listen for some new activity for the user. We're just trying to confirm that
+        # our bump below actually does what we think it does (triggers new activity for
+        # the user).
+        result_awaitable = notifier.wait_for_events(
+            user_id,
+            1000,
+            _on_new_acivity,
+            from_token=from_token,
+        )
+
+        # Update the account data or presence so that `notifier.wait_for_events(...)`
+        # wakes up. We chose these two options because they're least likely to show up
+        # in the Sliding Sync response so it won't affect whether we have results.
+        if wake_stream_key == StreamKeyType.ACCOUNT_DATA:
+            self.get_success(
+                self.hs.get_account_data_handler().add_account_data_for_user(
+                    user_id,
+                    "org.matrix.foobarbaz",
+                    {"foo": "bar"},
+                )
+            )
+        elif wake_stream_key == StreamKeyType.PRESENCE:
+            sending_user_id = self.register_user(
+                "user_bump_notifier_wait_for_events_" + random_string(10), "pass"
+            )
+            sending_user_tok = self.login(sending_user_id, "pass")
+            test_msg = {"foo": "bar"}
+            chan = self.make_request(
+                "PUT",
+                "/_matrix/client/r0/sendToDevice/m.test/1234",
+                content={"messages": {user_id: {"d1": test_msg}}},
+                access_token=sending_user_tok,
+            )
+            self.assertEqual(chan.code, 200, chan.result)
+        else:
+            assert_never(wake_stream_key)
+
+        # Wait for our notifier result
+        self.get_success(result_awaitable)
+
+        if not triggered_notifier_wait_for_events:
+            raise AssertionError(
+                "Expected `notifier.wait_for_events(...)` to be triggered"
+            )
+
+
+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+# foreground update for
+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+# https://github.com/element-hq/synapse/issues/17623)
+@parameterized_class(
+    ("use_new_tables",),
+    [
+        (True,),
+        (False,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
+)
+class SlidingSyncTestCase(SlidingSyncBase):
+    """
+    Tests regarding MSC3575 Sliding Sync `/sync` endpoint.
+
+    Please put tests in more specific test files if applicable. This test class is meant
+    for generic behavior of the endpoint.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+        devices.register_servlets,
+        receipts.register_servlets,
+        account_data.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.event_sources = hs.get_event_sources()
+        self.storage_controllers = hs.get_storage_controllers()
+        self.account_data_handler = hs.get_account_data_handler()
+        persistence = self.hs.get_storage_controllers().persistence
+        assert persistence is not None
+        self.persistence = persistence
+
+        super().prepare(reactor, clock, hs)
+
+    def test_sync_list(self) -> None:
+        """
+        Test that room IDs show up in the Sliding Sync `lists`
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make sure it has the foo-list we requested
+        self.assertListEqual(
+            list(response_body["lists"].keys()),
+            ["foo-list"],
+            response_body["lists"].keys(),
+        )
+
+        # Make sure the list includes the room we are joined to
+        self.assertListEqual(
+            list(response_body["lists"]["foo-list"]["ops"]),
+            [
+                {
+                    "op": "SYNC",
+                    "range": [0, 99],
+                    "room_ids": [room_id],
+                }
+            ],
+            response_body["lists"]["foo-list"],
+        )
+
+    def test_wait_for_sync_token(self) -> None:
+        """
+        Test that worker will wait until it catches up to the given token
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a future token that will cause us to wait. Since we never send a new
+        # event to reach that future stream_ordering, the worker will wait until the
+        # full timeout.
+        stream_id_gen = self.store.get_events_stream_id_generator()
+        stream_id = self.get_success(stream_id_gen.get_next().__aenter__())
+        current_token = self.event_sources.get_current_token()
+        future_position_token = current_token.copy_and_replace(
+            StreamKeyType.ROOM,
+            RoomStreamToken(stream=stream_id),
+        )
+
+        future_position_token_serialized = self.get_success(
+            SlidingSyncStreamToken(future_position_token, 0).to_string(self.store)
+        )
+
+        # Make the Sliding Sync request
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?pos={future_position_token_serialized}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 10 seconds to make `notifier.wait_for_stream_token(from_token)`
+        # timeout
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=9900)
+        channel.await_result(timeout_ms=200)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # We expect the next `pos` in the result to be the same as what we requested
+        # with because we weren't able to find anything new yet.
+        self.assertEqual(channel.json_body["pos"], future_position_token_serialized)
+
+    def test_wait_for_new_data(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive.
+
+        (Only applies to incremental syncs with a `timeout` specified)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 0]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?timeout=10000&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Bump the room with new events to trigger new results
+        event_response1 = self.helper.send(
+            room_id, "new activity in room", tok=user1_tok
+        )
+        # Should respond before the 10 second timeout
+        channel.await_result(timeout_ms=3000)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Check to make sure the new event is returned
+        self.assertEqual(
+            [
+                event["event_id"]
+                for event in channel.json_body["rooms"][room_id]["timeline"]
+            ],
+            [
+                event_response1["event_id"],
+            ],
+            channel.json_body["rooms"][room_id]["timeline"],
+        )
+
+    def test_wait_for_new_data_timeout(self) -> None:
+        """
+        Test to make sure that the Sliding Sync request waits for new data to arrive but
+        no data ever arrives so we timeout. We're also making sure that the default data
+        doesn't trigger a false-positive for new data.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 0]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make the Sliding Sync request
+        channel = self.make_request(
+            "POST",
+            self.sync_endpoint + f"?timeout=10000&pos={from_token}",
+            content=sync_body,
+            access_token=user1_tok,
+            await_result=False,
+        )
+        # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)`
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=5000)
+        # Wake-up `notifier.wait_for_events(...)` that will cause us test
+        # `SlidingSyncResult.__bool__` for new results.
+        self._bump_notifier_wait_for_events(
+            user1_id, wake_stream_key=StreamKeyType.ACCOUNT_DATA
+        )
+        # Block for a little bit more to ensure we don't see any new results.
+        with self.assertRaises(TimedOutException):
+            channel.await_result(timeout_ms=4000)
+        # Wait for the sync to complete (wait for the rest of the 10 second timeout,
+        # 5000 + 4000 + 1200 > 10000)
+        channel.await_result(timeout_ms=1200)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # There should be no room sent down.
+        self.assertFalse(channel.json_body["rooms"])
+
+    def test_forgotten_up_to_date(self) -> None:
+        """
+        Make sure we get up-to-date `forgotten` status for rooms
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # User1 is banned from the room (was never in the room)
+        self.helper.ban(room_id, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                    "filters": {},
+                },
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id},
+            exact=True,
+        )
+
+        # User1 forgets the room
+        channel = self.make_request(
+            "POST",
+            f"/_matrix/client/r0/rooms/{room_id}/forget",
+            content={},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+        # We should no longer see the forgotten room
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            set(),
+            exact=True,
+        )
+
+    def test_ignored_user_invites_initial_sync(self) -> None:
+        """
+        Make sure we ignore invites if they are from one of the `m.ignored_user_list` on
+        initial sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create a room that user1 is already in
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create a room that user2 is already in
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # User1 is invited to room_id2
+        self.helper.invite(room_id2, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        # Sync once before we ignore to make sure the rooms can show up
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # room_id2 shows up because we haven't ignored the user yet
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id1, room_id2},
+            exact=True,
+        )
+
+        # User1 ignores user2
+        channel = self.make_request(
+            "PUT",
+            f"/_matrix/client/v3/user/{user1_id}/account_data/{AccountDataTypes.IGNORED_USER_LIST}",
+            content={"ignored_users": {user2_id: {}}},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+        # Sync again (initial sync)
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        # The invite for room_id2 should no longer show up because user2 is ignored
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id1},
+            exact=True,
+        )
+
+    def test_ignored_user_invites_incremental_sync(self) -> None:
+        """
+        Make sure we ignore invites if they are from one of the `m.ignored_user_list` on
+        incremental sync.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create a room that user1 is already in
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Create a room that user2 is already in
+        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # User1 ignores user2
+        channel = self.make_request(
+            "PUT",
+            f"/_matrix/client/v3/user/{user1_id}/account_data/{AccountDataTypes.IGNORED_USER_LIST}",
+            content={"ignored_users": {user2_id: {}}},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+        # Initial sync
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                },
+            }
+        }
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+        # User1 only has membership in room_id1 at this point
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id1},
+            exact=True,
+        )
+
+        # User1 is invited to room_id2 after the initial sync
+        self.helper.invite(room_id2, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        # Sync again (incremental sync)
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+        # The invite for room_id2 doesn't show up because user2 is ignored
+        self.assertIncludes(
+            set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
+            {room_id1},
+            exact=True,
+        )
+
+    def test_sort_list(self) -> None:
+        """
+        Test that the `lists` are sorted by `stream_ordering`
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+        room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+        # Activity that will order the rooms
+        self.helper.send(room_id3, "activity in room3", tok=user1_tok)
+        self.helper.send(room_id1, "activity in room1", tok=user1_tok)
+        self.helper.send(room_id2, "activity in room2", tok=user1_tok)
+
+        # Make the Sliding Sync request where the range includes *some* of the rooms
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make sure it has the foo-list we requested
+        self.assertIncludes(
+            response_body["lists"].keys(),
+            {"foo-list"},
+        )
+        # Make sure the list is sorted in the way we expect (we only sort when the range
+        # doesn't include all of the room)
+        self.assertListEqual(
+            list(response_body["lists"]["foo-list"]["ops"]),
+            [
+                {
+                    "op": "SYNC",
+                    "range": [0, 1],
+                    "room_ids": [room_id2, room_id1],
+                }
+            ],
+            response_body["lists"]["foo-list"],
+        )
+
+        # Make the Sliding Sync request where the range includes *all* of the rooms
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 99]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make sure it has the foo-list we requested
+        self.assertIncludes(
+            response_body["lists"].keys(),
+            {"foo-list"},
+        )
+        # Since the range includes all of the rooms, we don't sort the list
+        self.assertEqual(
+            len(response_body["lists"]["foo-list"]["ops"]),
+            1,
+            response_body["lists"]["foo-list"],
+        )
+        op = response_body["lists"]["foo-list"]["ops"][0]
+        self.assertEqual(op["op"], "SYNC")
+        self.assertEqual(op["range"], [0, 99])
+        # Note that we don't sort the rooms when the range includes all of the rooms, so
+        # we just assert that the rooms are included
+        self.assertIncludes(
+            set(op["room_ids"]), {room_id1, room_id2, room_id3}, exact=True
+        )
+
+    def test_sliced_windows(self) -> None:
+        """
+        Test that the `lists` `ranges` are sliced correctly. Both sides of each range
+        are inclusive.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        _room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+        room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+
+        # Make the Sliding Sync request for a single room
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 0]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make sure it has the foo-list we requested
+        self.assertListEqual(
+            list(response_body["lists"].keys()),
+            ["foo-list"],
+            response_body["lists"].keys(),
+        )
+        # Make sure the list is sorted in the way we expect
+        self.assertListEqual(
+            list(response_body["lists"]["foo-list"]["ops"]),
+            [
+                {
+                    "op": "SYNC",
+                    "range": [0, 0],
+                    "room_ids": [room_id3],
+                }
+            ],
+            response_body["lists"]["foo-list"],
+        )
+
+        # Make the Sliding Sync request for the first two rooms
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make sure it has the foo-list we requested
+        self.assertListEqual(
+            list(response_body["lists"].keys()),
+            ["foo-list"],
+            response_body["lists"].keys(),
+        )
+        # Make sure the list is sorted in the way we expect
+        self.assertListEqual(
+            list(response_body["lists"]["foo-list"]["ops"]),
+            [
+                {
+                    "op": "SYNC",
+                    "range": [0, 1],
+                    "room_ids": [room_id3, room_id2],
+                }
+            ],
+            response_body["lists"]["foo-list"],
+        )
+
+    def test_rooms_with_no_updates_do_not_come_down_incremental_sync(self) -> None:
+        """
+        Test that rooms with no updates are returned in subsequent incremental
+        syncs.
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+
+        _, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+        # Make the incremental Sliding Sync request
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # Nothing has happened in the room, so the room should not come down
+        # /sync.
+        self.assertIsNone(response_body["rooms"].get(room_id1))
+
+    def test_empty_initial_room_comes_down_sync(self) -> None:
+        """
+        Test that rooms come down /sync even with empty required state and
+        timeline limit in initial sync.
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [],
+                    "timeline_limit": 0,
+                }
+            }
+        }
+
+        # Make the Sliding Sync request
+        response_body, _ = self.do_sync(sync_body, tok=user1_tok)
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+
+    def test_state_reset_room_comes_down_incremental_sync(self) -> None:
+        """Test that a room that we were state reset out of comes down
+        incremental sync"""
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(
+            user2_id,
+            is_public=True,
+            tok=user2_tok,
+            extra_content={
+                "name": "my super room",
+            },
+        )
+
+        # Create an event for us to point back to for the state reset
+        event_response = self.helper.send(room_id1, "test", tok=user2_tok)
+        event_id = event_response["event_id"]
+
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        # Request all state just to see what we get back when we are
+                        # state reset out of the room
+                        [StateValues.WILDCARD, StateValues.WILDCARD]
+                    ],
+                    "timeline_limit": 1,
+                }
+            }
+        }
+
+        # Make the Sliding Sync request
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+        # Make sure we see room1
+        self.assertIncludes(set(response_body["rooms"].keys()), {room_id1}, exact=True)
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+
+        # Trigger a state reset
+        join_rule_event, join_rule_context = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[event_id],
+                type=EventTypes.JoinRules,
+                state_key="",
+                content={"join_rule": JoinRules.INVITE},
+                sender=user2_id,
+                room_id=room_id1,
+                room_version=self.get_success(self.store.get_room_version_id(room_id1)),
+            )
+        )
+        _, join_rule_event_pos, _ = self.get_success(
+            self.persistence.persist_event(join_rule_event, join_rule_context)
+        )
+
+        # FIXME: We're manually busting the cache since
+        # https://github.com/element-hq/synapse/issues/17368 is not solved yet
+        self.store._membership_stream_cache.entity_has_changed(
+            user1_id, join_rule_event_pos.stream
+        )
+
+        # Ensure that the state reset worked and only user2 is in the room now
+        users_in_room = self.get_success(self.store.get_users_in_room(room_id1))
+        self.assertIncludes(set(users_in_room), {user2_id}, exact=True)
+
+        state_map_at_reset = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # Update the state after user1 was state reset out of the room
+        self.helper.send_state(
+            room_id1,
+            EventTypes.Name,
+            {EventContentFields.ROOM_NAME: "my super duper room"},
+            tok=user2_tok,
+        )
+
+        # Make another Sliding Sync request (incremental)
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # Expect to see room1 because it is `newly_left` thanks to being state reset out
+        # of it since the last time we synced. We need to let the client know that
+        # something happened and that they are no longer in the room.
+        self.assertIncludes(set(response_body["rooms"].keys()), {room_id1}, exact=True)
+        # We set `initial=True` to indicate that the client should reset the state they
+        # have about the room
+        self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+        # They shouldn't see anything past the state reset
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][room_id1]["required_state"],
+            # We should see all the state events in the room
+            state_map_at_reset.values(),
+            exact=True,
+        )
+        # The position where the state reset happened
+        self.assertEqual(
+            response_body["rooms"][room_id1]["bump_stamp"],
+            join_rule_event_pos.stream,
+            response_body["rooms"][room_id1],
+        )
+
+        # Other non-important things. We just want to check what these are so we know
+        # what happens in a state reset scenario.
+        #
+        # Room name was set at the time of the state reset so we should still be able to
+        # see it.
+        self.assertEqual(response_body["rooms"][room_id1]["name"], "my super room")
+        # Could be set but there is no avatar for this room
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("avatar"),
+            response_body["rooms"][room_id1],
+        )
+        # Could be set but this room isn't marked as a DM
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("is_dm"),
+            response_body["rooms"][room_id1],
+        )
+        # Empty timeline because we are not in the room at all (they are all being
+        # filtered out)
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("timeline"),
+            response_body["rooms"][room_id1],
+        )
+        # `limited` since we're not providing any timeline events but there are some in
+        # the room.
+        self.assertEqual(response_body["rooms"][room_id1]["limited"], True)
+        # User is no longer in the room so they can't see this info
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("joined_count"),
+            response_body["rooms"][room_id1],
+        )
+        self.assertIsNone(
+            response_body["rooms"][room_id1].get("invited_count"),
+            response_body["rooms"][room_id1],
+        )
+
+    def test_state_reset_previously_room_comes_down_incremental_sync_with_filters(
+        self,
+    ) -> None:
+        """
+        Test that a room that we were state reset out of should always be sent down
+        regardless of the filters if it has been sent down the connection before.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE},
+                "name": "my super space",
+            },
+        )
+
+        # Create an event for us to point back to for the state reset
+        event_response = self.helper.send(space_room_id, "test", tok=user2_tok)
+        event_id = event_response["event_id"]
+
+        self.helper.join(space_room_id, user1_id, tok=user1_tok)
+
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        # Request all state just to see what we get back when we are
+                        # state reset out of the room
+                        [StateValues.WILDCARD, StateValues.WILDCARD]
+                    ],
+                    "timeline_limit": 1,
+                    "filters": {
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                }
+            }
+        }
+
+        # Make the Sliding Sync request
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+        # Make sure we see room1
+        self.assertIncludes(
+            set(response_body["rooms"].keys()), {space_room_id}, exact=True
+        )
+        self.assertEqual(response_body["rooms"][space_room_id]["initial"], True)
+
+        # Trigger a state reset
+        join_rule_event, join_rule_context = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[event_id],
+                type=EventTypes.JoinRules,
+                state_key="",
+                content={"join_rule": JoinRules.INVITE},
+                sender=user2_id,
+                room_id=space_room_id,
+                room_version=self.get_success(
+                    self.store.get_room_version_id(space_room_id)
+                ),
+            )
+        )
+        _, join_rule_event_pos, _ = self.get_success(
+            self.persistence.persist_event(join_rule_event, join_rule_context)
+        )
+
+        # FIXME: We're manually busting the cache since
+        # https://github.com/element-hq/synapse/issues/17368 is not solved yet
+        self.store._membership_stream_cache.entity_has_changed(
+            user1_id, join_rule_event_pos.stream
+        )
+
+        # Ensure that the state reset worked and only user2 is in the room now
+        users_in_room = self.get_success(self.store.get_users_in_room(space_room_id))
+        self.assertIncludes(set(users_in_room), {user2_id}, exact=True)
+
+        state_map_at_reset = self.get_success(
+            self.storage_controllers.state.get_current_state(space_room_id)
+        )
+
+        # Update the state after user1 was state reset out of the room
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.Name,
+            {EventContentFields.ROOM_NAME: "my super duper space"},
+            tok=user2_tok,
+        )
+
+        # User2 also leaves the room so the server is no longer participating in the room
+        # and we don't have access to current state
+        self.helper.leave(space_room_id, user2_id, tok=user2_tok)
+
+        # Make another Sliding Sync request (incremental)
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        # Expect to see room1 because it is `newly_left` thanks to being state reset out
+        # of it since the last time we synced. We need to let the client know that
+        # something happened and that they are no longer in the room.
+        self.assertIncludes(
+            set(response_body["rooms"].keys()), {space_room_id}, exact=True
+        )
+        # We set `initial=True` to indicate that the client should reset the state they
+        # have about the room
+        self.assertEqual(response_body["rooms"][space_room_id]["initial"], True)
+        # They shouldn't see anything past the state reset
+        self._assertRequiredStateIncludes(
+            response_body["rooms"][space_room_id]["required_state"],
+            # We should see all the state events in the room
+            state_map_at_reset.values(),
+            exact=True,
+        )
+        # The position where the state reset happened
+        self.assertEqual(
+            response_body["rooms"][space_room_id]["bump_stamp"],
+            join_rule_event_pos.stream,
+            response_body["rooms"][space_room_id],
+        )
+
+        # Other non-important things. We just want to check what these are so we know
+        # what happens in a state reset scenario.
+        #
+        # Room name was set at the time of the state reset so we should still be able to
+        # see it.
+        self.assertEqual(
+            response_body["rooms"][space_room_id]["name"], "my super space"
+        )
+        # Could be set but there is no avatar for this room
+        self.assertIsNone(
+            response_body["rooms"][space_room_id].get("avatar"),
+            response_body["rooms"][space_room_id],
+        )
+        # Could be set but this room isn't marked as a DM
+        self.assertIsNone(
+            response_body["rooms"][space_room_id].get("is_dm"),
+            response_body["rooms"][space_room_id],
+        )
+        # Empty timeline because we are not in the room at all (they are all being
+        # filtered out)
+        self.assertIsNone(
+            response_body["rooms"][space_room_id].get("timeline"),
+            response_body["rooms"][space_room_id],
+        )
+        # `limited` since we're not providing any timeline events but there are some in
+        # the room.
+        self.assertEqual(response_body["rooms"][space_room_id]["limited"], True)
+        # User is no longer in the room so they can't see this info
+        self.assertIsNone(
+            response_body["rooms"][space_room_id].get("joined_count"),
+            response_body["rooms"][space_room_id],
+        )
+        self.assertIsNone(
+            response_body["rooms"][space_room_id].get("invited_count"),
+            response_body["rooms"][space_room_id],
+        )
+
+    @parameterized.expand(
+        [
+            ("server_leaves_room", True),
+            ("server_participating_in_room", False),
+        ]
+    )
+    def test_state_reset_never_room_incremental_sync_with_filters(
+        self, test_description: str, server_leaves_room: bool
+    ) -> None:
+        """
+        Test that a room that we were state reset out of should be sent down if we can
+        figure out the state or if it was sent down the connection before.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create a space room
+        space_room_id = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE},
+                "name": "my super space",
+            },
+        )
+
+        # Create another space room
+        space_room_id2 = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE},
+            },
+        )
+
+        # Create an event for us to point back to for the state reset
+        event_response = self.helper.send(space_room_id, "test", tok=user2_tok)
+        event_id = event_response["event_id"]
+
+        # User1 joins the rooms
+        #
+        self.helper.join(space_room_id, user1_id, tok=user1_tok)
+        # Join space_room_id2 so that it is at the top of the list
+        self.helper.join(space_room_id2, user1_id, tok=user1_tok)
+
+        # Make a SS request for only the top room.
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    "ranges": [[0, 0]],
+                    "required_state": [
+                        # Request all state just to see what we get back when we are
+                        # state reset out of the room
+                        [StateValues.WILDCARD, StateValues.WILDCARD]
+                    ],
+                    "timeline_limit": 1,
+                    "filters": {
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                }
+            }
+        }
+
+        # Make the Sliding Sync request
+        response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+        # Make sure we only see space_room_id2
+        self.assertIncludes(
+            set(response_body["rooms"].keys()), {space_room_id2}, exact=True
+        )
+        self.assertEqual(response_body["rooms"][space_room_id2]["initial"], True)
+
+        # Just create some activity in space_room_id2 so it appears when we incremental sync again
+        self.helper.send(space_room_id2, "test", tok=user2_tok)
+
+        # Trigger a state reset
+        join_rule_event, join_rule_context = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[event_id],
+                type=EventTypes.JoinRules,
+                state_key="",
+                content={"join_rule": JoinRules.INVITE},
+                sender=user2_id,
+                room_id=space_room_id,
+                room_version=self.get_success(
+                    self.store.get_room_version_id(space_room_id)
+                ),
+            )
+        )
+        _, join_rule_event_pos, _ = self.get_success(
+            self.persistence.persist_event(join_rule_event, join_rule_context)
+        )
+
+        # FIXME: We're manually busting the cache since
+        # https://github.com/element-hq/synapse/issues/17368 is not solved yet
+        self.store._membership_stream_cache.entity_has_changed(
+            user1_id, join_rule_event_pos.stream
+        )
+
+        # Ensure that the state reset worked and only user2 is in the room now
+        users_in_room = self.get_success(self.store.get_users_in_room(space_room_id))
+        self.assertIncludes(set(users_in_room), {user2_id}, exact=True)
+
+        # Update the state after user1 was state reset out of the room.
+        # This will also bump it to the top of the list.
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.Name,
+            {EventContentFields.ROOM_NAME: "my super duper space"},
+            tok=user2_tok,
+        )
+
+        if server_leaves_room:
+            # User2 also leaves the room so the server is no longer participating in the room
+            # and we don't have access to current state
+            self.helper.leave(space_room_id, user2_id, tok=user2_tok)
+
+        # Make another Sliding Sync request (incremental)
+        sync_body = {
+            "lists": {
+                "foo-list": {
+                    # Expand the range to include all rooms
+                    "ranges": [[0, 1]],
+                    "required_state": [
+                        # Request all state just to see what we get back when we are
+                        # state reset out of the room
+                        [StateValues.WILDCARD, StateValues.WILDCARD]
+                    ],
+                    "timeline_limit": 1,
+                    "filters": {
+                        "room_types": [RoomTypes.SPACE],
+                    },
+                }
+            }
+        }
+        response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+        if self.use_new_tables:
+            if server_leaves_room:
+                # We still only expect to see space_room_id2 because even though we were state
+                # reset out of space_room_id, it was never sent down the connection before so we
+                # don't need to bother the client with it.
+                self.assertIncludes(
+                    set(response_body["rooms"].keys()), {space_room_id2}, exact=True
+                )
+            else:
+                # Both rooms show up because we can figure out the state for the
+                # `filters.room_types` if someone is still in the room (we look at the
+                # current state because `room_type` never changes).
+                self.assertIncludes(
+                    set(response_body["rooms"].keys()),
+                    {space_room_id, space_room_id2},
+                    exact=True,
+                )
+        else:
+            # Both rooms show up because we can actually take the time to figure out the
+            # state for the `filters.room_types` in the fallback path (we look at
+            # historical state for `LEAVE` membership).
+            self.assertIncludes(
+                set(response_body["rooms"].keys()),
+                {space_room_id, space_room_id2},
+                exact=True,
+            )
diff --git a/tests/rest/client/test_auth_issuer.py b/tests/rest/client/test_auth_issuer.py
index 964baeec32..d6f334a7ab 100644
--- a/tests/rest/client/test_auth_issuer.py
+++ b/tests/rest/client/test_auth_issuer.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 from http import HTTPStatus
+from unittest.mock import AsyncMock
 
 from synapse.rest.client import auth_issuer
 
@@ -50,10 +51,29 @@ class AuthIssuerTestCase(HomeserverTestCase):
         }
     )
     def test_returns_issuer_when_oidc_enabled(self) -> None:
-        # Make an unauthenticated request for the discovery info.
+        # Patch the HTTP client to return the issuer metadata
+        req_mock = AsyncMock(return_value={"issuer": ISSUER})
+        self.hs.get_proxied_http_client().get_json = req_mock  # type: ignore[method-assign]
+
         channel = self.make_request(
             "GET",
             "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer",
         )
+
         self.assertEqual(channel.code, HTTPStatus.OK)
         self.assertEqual(channel.json_body, {"issuer": ISSUER})
+
+        req_mock.assert_called_with(
+            "https://account.example.com/.well-known/openid-configuration"
+        )
+        req_mock.reset_mock()
+
+        # Second call it should use the cached value
+        channel = self.make_request(
+            "GET",
+            "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer",
+        )
+
+        self.assertEqual(channel.code, HTTPStatus.OK)
+        self.assertEqual(channel.json_body, {"issuer": ISSUER})
+        req_mock.assert_not_called()
diff --git a/tests/rest/client/test_delayed_events.py b/tests/rest/client/test_delayed_events.py
new file mode 100644
index 0000000000..1793b38c4a
--- /dev/null
+++ b/tests/rest/client/test_delayed_events.py
@@ -0,0 +1,376 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+"""Tests REST events for /delayed_events paths."""
+
+from http import HTTPStatus
+from typing import List
+
+from parameterized import parameterized
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.errors import Codes
+from synapse.rest.client import delayed_events, room, versions
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util import Clock
+
+from tests import unittest
+from tests.unittest import HomeserverTestCase
+
+PATH_PREFIX = "/_matrix/client/unstable/org.matrix.msc4140/delayed_events"
+
+_HS_NAME = "red"
+_EVENT_TYPE = "com.example.test"
+
+
+class DelayedEventsUnstableSupportTestCase(HomeserverTestCase):
+    servlets = [versions.register_servlets]
+
+    def test_false_by_default(self) -> None:
+        channel = self.make_request("GET", "/_matrix/client/versions")
+        self.assertEqual(channel.code, 200, channel.result)
+        self.assertFalse(channel.json_body["unstable_features"]["org.matrix.msc4140"])
+
+    @unittest.override_config({"max_event_delay_duration": "24h"})
+    def test_true_if_enabled(self) -> None:
+        channel = self.make_request("GET", "/_matrix/client/versions")
+        self.assertEqual(channel.code, 200, channel.result)
+        self.assertTrue(channel.json_body["unstable_features"]["org.matrix.msc4140"])
+
+
+class DelayedEventsTestCase(HomeserverTestCase):
+    """Tests getting and managing delayed events."""
+
+    servlets = [delayed_events.register_servlets, room.register_servlets]
+    user_id = f"@sid1:{_HS_NAME}"
+
+    def default_config(self) -> JsonDict:
+        config = super().default_config()
+        config["server_name"] = _HS_NAME
+        config["max_event_delay_duration"] = "24h"
+        return config
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.room_id = self.helper.create_room_as(
+            self.user_id,
+            extra_content={
+                "preset": "trusted_private_chat",
+            },
+        )
+
+    def test_delayed_events_empty_on_startup(self) -> None:
+        self.assertListEqual([], self._get_delayed_events())
+
+    def test_delayed_state_events_are_sent_on_timeout(self) -> None:
+        state_key = "to_send_on_timeout"
+
+        setter_key = "setter"
+        setter_expected = "on_timeout"
+        channel = self.make_request(
+            "PUT",
+            _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 900),
+            {
+                setter_key: setter_expected,
+            },
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+        events = self._get_delayed_events()
+        self.assertEqual(1, len(events), events)
+        content = self._get_delayed_event_content(events[0])
+        self.assertEqual(setter_expected, content.get(setter_key), content)
+        self.helper.get_state(
+            self.room_id,
+            _EVENT_TYPE,
+            "",
+            state_key=state_key,
+            expect_code=HTTPStatus.NOT_FOUND,
+        )
+
+        self.reactor.advance(1)
+        self.assertListEqual([], self._get_delayed_events())
+        content = self.helper.get_state(
+            self.room_id,
+            _EVENT_TYPE,
+            "",
+            state_key=state_key,
+        )
+        self.assertEqual(setter_expected, content.get(setter_key), content)
+
+    def test_update_delayed_event_without_id(self) -> None:
+        channel = self.make_request(
+            "POST",
+            f"{PATH_PREFIX}/",
+        )
+        self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, channel.result)
+
+    def test_update_delayed_event_without_body(self) -> None:
+        channel = self.make_request(
+            "POST",
+            f"{PATH_PREFIX}/abc",
+        )
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
+        self.assertEqual(
+            Codes.NOT_JSON,
+            channel.json_body["errcode"],
+        )
+
+    def test_update_delayed_event_without_action(self) -> None:
+        channel = self.make_request(
+            "POST",
+            f"{PATH_PREFIX}/abc",
+            {},
+        )
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
+        self.assertEqual(
+            Codes.MISSING_PARAM,
+            channel.json_body["errcode"],
+        )
+
+    def test_update_delayed_event_with_invalid_action(self) -> None:
+        channel = self.make_request(
+            "POST",
+            f"{PATH_PREFIX}/abc",
+            {"action": "oops"},
+        )
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
+        self.assertEqual(
+            Codes.INVALID_PARAM,
+            channel.json_body["errcode"],
+        )
+
+    @parameterized.expand(["cancel", "restart", "send"])
+    def test_update_delayed_event_without_match(self, action: str) -> None:
+        channel = self.make_request(
+            "POST",
+            f"{PATH_PREFIX}/abc",
+            {"action": action},
+        )
+        self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, channel.result)
+
+    def test_cancel_delayed_state_event(self) -> None:
+        state_key = "to_never_send"
+
+        setter_key = "setter"
+        setter_expected = "none"
+        channel = self.make_request(
+            "PUT",
+            _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 1500),
+            {
+                setter_key: setter_expected,
+            },
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+        delay_id = channel.json_body.get("delay_id")
+        self.assertIsNotNone(delay_id)
+
+        self.reactor.advance(1)
+        events = self._get_delayed_events()
+        self.assertEqual(1, len(events), events)
+        content = self._get_delayed_event_content(events[0])
+        self.assertEqual(setter_expected, content.get(setter_key), content)
+        self.helper.get_state(
+            self.room_id,
+            _EVENT_TYPE,
+            "",
+            state_key=state_key,
+            expect_code=HTTPStatus.NOT_FOUND,
+        )
+
+        channel = self.make_request(
+            "POST",
+            f"{PATH_PREFIX}/{delay_id}",
+            {"action": "cancel"},
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+        self.assertListEqual([], self._get_delayed_events())
+
+        self.reactor.advance(1)
+        content = self.helper.get_state(
+            self.room_id,
+            _EVENT_TYPE,
+            "",
+            state_key=state_key,
+            expect_code=HTTPStatus.NOT_FOUND,
+        )
+
+    def test_send_delayed_state_event(self) -> None:
+        state_key = "to_send_on_request"
+
+        setter_key = "setter"
+        setter_expected = "on_send"
+        channel = self.make_request(
+            "PUT",
+            _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 100000),
+            {
+                setter_key: setter_expected,
+            },
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+        delay_id = channel.json_body.get("delay_id")
+        self.assertIsNotNone(delay_id)
+
+        self.reactor.advance(1)
+        events = self._get_delayed_events()
+        self.assertEqual(1, len(events), events)
+        content = self._get_delayed_event_content(events[0])
+        self.assertEqual(setter_expected, content.get(setter_key), content)
+        self.helper.get_state(
+            self.room_id,
+            _EVENT_TYPE,
+            "",
+            state_key=state_key,
+            expect_code=HTTPStatus.NOT_FOUND,
+        )
+
+        channel = self.make_request(
+            "POST",
+            f"{PATH_PREFIX}/{delay_id}",
+            {"action": "send"},
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+        self.assertListEqual([], self._get_delayed_events())
+        content = self.helper.get_state(
+            self.room_id,
+            _EVENT_TYPE,
+            "",
+            state_key=state_key,
+        )
+        self.assertEqual(setter_expected, content.get(setter_key), content)
+
+    def test_restart_delayed_state_event(self) -> None:
+        state_key = "to_send_on_restarted_timeout"
+
+        setter_key = "setter"
+        setter_expected = "on_timeout"
+        channel = self.make_request(
+            "PUT",
+            _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 1500),
+            {
+                setter_key: setter_expected,
+            },
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+        delay_id = channel.json_body.get("delay_id")
+        self.assertIsNotNone(delay_id)
+
+        self.reactor.advance(1)
+        events = self._get_delayed_events()
+        self.assertEqual(1, len(events), events)
+        content = self._get_delayed_event_content(events[0])
+        self.assertEqual(setter_expected, content.get(setter_key), content)
+        self.helper.get_state(
+            self.room_id,
+            _EVENT_TYPE,
+            "",
+            state_key=state_key,
+            expect_code=HTTPStatus.NOT_FOUND,
+        )
+
+        channel = self.make_request(
+            "POST",
+            f"{PATH_PREFIX}/{delay_id}",
+            {"action": "restart"},
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+
+        self.reactor.advance(1)
+        events = self._get_delayed_events()
+        self.assertEqual(1, len(events), events)
+        content = self._get_delayed_event_content(events[0])
+        self.assertEqual(setter_expected, content.get(setter_key), content)
+        self.helper.get_state(
+            self.room_id,
+            _EVENT_TYPE,
+            "",
+            state_key=state_key,
+            expect_code=HTTPStatus.NOT_FOUND,
+        )
+
+        self.reactor.advance(1)
+        self.assertListEqual([], self._get_delayed_events())
+        content = self.helper.get_state(
+            self.room_id,
+            _EVENT_TYPE,
+            "",
+            state_key=state_key,
+        )
+        self.assertEqual(setter_expected, content.get(setter_key), content)
+
+    def test_delayed_state_events_are_cancelled_by_more_recent_state(self) -> None:
+        state_key = "to_be_cancelled"
+
+        setter_key = "setter"
+        channel = self.make_request(
+            "PUT",
+            _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 900),
+            {
+                setter_key: "on_timeout",
+            },
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+        events = self._get_delayed_events()
+        self.assertEqual(1, len(events), events)
+
+        setter_expected = "manual"
+        self.helper.send_state(
+            self.room_id,
+            _EVENT_TYPE,
+            {
+                setter_key: setter_expected,
+            },
+            None,
+            state_key=state_key,
+        )
+        self.assertListEqual([], self._get_delayed_events())
+
+        self.reactor.advance(1)
+        content = self.helper.get_state(
+            self.room_id,
+            _EVENT_TYPE,
+            "",
+            state_key=state_key,
+        )
+        self.assertEqual(setter_expected, content.get(setter_key), content)
+
+    def _get_delayed_events(self) -> List[JsonDict]:
+        channel = self.make_request(
+            "GET",
+            PATH_PREFIX,
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+
+        key = "delayed_events"
+        self.assertIn(key, channel.json_body)
+
+        events = channel.json_body[key]
+        self.assertIsInstance(events, list)
+
+        return events
+
+    def _get_delayed_event_content(self, event: JsonDict) -> JsonDict:
+        key = "content"
+        self.assertIn(key, event)
+
+        content = event[key]
+        self.assertIsInstance(content, dict)
+
+        return content
+
+
+def _get_path_for_delayed_state(
+    room_id: str, event_type: str, state_key: str, delay_ms: int
+) -> str:
+    return f"rooms/{room_id}/state/{event_type}/{state_key}?org.matrix.msc4140.delay={delay_ms}"
diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py
index 06f1c1b234..039144fdbe 100644
--- a/tests/rest/client/test_events.py
+++ b/tests/rest/client/test_events.py
@@ -19,7 +19,7 @@
 #
 #
 
-""" Tests REST events for /events paths."""
+"""Tests REST events for /events paths."""
 
 from unittest.mock import Mock
 
diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py
index 8bbd109092..d9a210b616 100644
--- a/tests/rest/client/test_keys.py
+++ b/tests/rest/client/test_keys.py
@@ -315,9 +315,7 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase):
                     "master_key": master_key2,
                 },
             )
-            self.assertEqual(
-                channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body
-            )
+            self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body)
 
         # Pretend that MAS did UIA and allowed us to replace the master key.
         channel = self.make_request(
@@ -349,9 +347,7 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase):
                     "master_key": master_key3,
                 },
             )
-            self.assertEqual(
-                channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body
-            )
+            self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body)
 
         # Pretend that MAS did UIA and allowed us to replace the master key.
         channel = self.make_request(
@@ -376,6 +372,4 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase):
                     "master_key": master_key3,
                 },
             )
-            self.assertEqual(
-                channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body
-            )
+            self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body)
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index 3fb77fd9dd..cbd6d8d4bf 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -969,9 +969,8 @@ class CASTestCase(unittest.HomeserverTestCase):
         # Test that the response is HTML.
         self.assertEqual(channel.code, 200, channel.result)
         content_type_header_value = ""
-        for header in channel.result.get("headers", []):
-            if header[0] == b"Content-Type":
-                content_type_header_value = header[1].decode("utf8")
+        for header in channel.headers.getRawHeaders("Content-Type", []):
+            content_type_header_value = header
 
         self.assertTrue(content_type_header_value.startswith("text/html"))
 
@@ -1048,6 +1047,7 @@ class JWTTestCase(unittest.HomeserverTestCase):
     servlets = [
         synapse.rest.admin.register_servlets_for_client_rest_resource,
         login.register_servlets,
+        profile.register_servlets,
     ]
 
     jwt_secret = "secret"
@@ -1203,6 +1203,30 @@ class JWTTestCase(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 200, msg=channel.result)
         self.assertEqual(channel.json_body["user_id"], "@frog:test")
 
+    @override_config(
+        {"jwt_config": {**base_config, "display_name_claim": "display_name"}}
+    )
+    def test_login_custom_display_name(self) -> None:
+        """Test setting a custom display name."""
+        localpart = "pinkie"
+        user_id = f"@{localpart}:test"
+        display_name = "Pinkie Pie"
+
+        # Perform the login, specifying a custom display name.
+        channel = self.jwt_login({"sub": localpart, "display_name": display_name})
+        self.assertEqual(channel.code, 200, msg=channel.result)
+        self.assertEqual(channel.json_body["user_id"], user_id)
+
+        # Fetch the user's display name and check that it was set correctly.
+        access_token = channel.json_body["access_token"]
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/v3/profile/{user_id}/displayname",
+            access_token=access_token,
+        )
+        self.assertEqual(channel.code, 200, msg=channel.result)
+        self.assertEqual(channel.json_body["displayname"], display_name)
+
     def test_login_no_token(self) -> None:
         params = {"type": "org.matrix.login.jwt"}
         channel = self.make_request(b"POST", LOGIN_URL, params)
diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py
index 6b5af2dbb6..42014e257e 100644
--- a/tests/rest/client/test_media.py
+++ b/tests/rest/client/test_media.py
@@ -23,12 +23,15 @@ import io
 import json
 import os
 import re
-from typing import Any, BinaryIO, ClassVar, Dict, List, Optional, Sequence, Tuple, Type
+import shutil
+from typing import Any, BinaryIO, Dict, List, Optional, Sequence, Tuple, Type
 from unittest.mock import MagicMock, Mock, patch
 from urllib import parse
 from urllib.parse import quote, urlencode
 
-from parameterized import parameterized_class
+from parameterized import parameterized, parameterized_class
+from PIL import Image as Image
+from typing_extensions import ClassVar
 
 from twisted.internet import defer
 from twisted.internet._resolver import HostResolution
@@ -48,7 +51,8 @@ from synapse.config.oembed import OEmbedEndpointConfig
 from synapse.http.client import MultipartResponse
 from synapse.http.types import QueryParams
 from synapse.logging.context import make_deferred_yieldable
-from synapse.media._base import FileInfo
+from synapse.media._base import FileInfo, ThumbnailInfo
+from synapse.media.thumbnailer import ThumbnailProvider
 from synapse.media.url_previewer import IMAGE_CACHE_EXPIRY_MS
 from synapse.rest import admin
 from synapse.rest.client import login, media
@@ -76,7 +80,7 @@ except ImportError:
     lxml = None  # type: ignore[assignment]
 
 
-class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
+class MediaDomainBlockingTests(unittest.HomeserverTestCase):
     remote_media_id = "doesnotmatter"
     remote_server_name = "evil.com"
     servlets = [
@@ -144,7 +148,6 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
             # Should result in a 404.
             "prevent_media_downloads_from": ["evil.com"],
             "dynamic_thumbnails": True,
-            "experimental_features": {"msc3916_authenticated_media_enabled": True},
         }
     )
     def test_cannot_download_blocked_media_thumbnail(self) -> None:
@@ -153,7 +156,7 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
         """
         response = self.make_request(
             "GET",
-            f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
+            f"/_matrix/client/v1/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
             shorthand=False,
             content={"width": 100, "height": 100},
             access_token=self.tok,
@@ -166,7 +169,6 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
             # This proves we haven't broken anything.
             "prevent_media_downloads_from": ["not-listed.com"],
             "dynamic_thumbnails": True,
-            "experimental_features": {"msc3916_authenticated_media_enabled": True},
         }
     )
     def test_remote_media_thumbnail_normally_unblocked(self) -> None:
@@ -175,14 +177,14 @@ class UnstableMediaDomainBlockingTests(unittest.HomeserverTestCase):
         """
         response = self.make_request(
             "GET",
-            f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
+            f"/_matrix/client/v1/media/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
             shorthand=False,
             access_token=self.tok,
         )
         self.assertEqual(response.code, 200)
 
 
-class UnstableURLPreviewTests(unittest.HomeserverTestCase):
+class URLPreviewTests(unittest.HomeserverTestCase):
     if not lxml:
         skip = "url preview feature requires lxml"
 
@@ -198,7 +200,6 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
     def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         config = self.default_config()
-        config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
         config["url_preview_enabled"] = True
         config["max_spider_size"] = 9999999
         config["url_preview_ip_range_blacklist"] = (
@@ -284,18 +285,6 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         self.reactor.nameResolver = Resolver()  # type: ignore[assignment]
 
-    def create_resource_dict(self) -> Dict[str, Resource]:
-        """Create a resource tree for the test server
-
-        A resource tree is a mapping from path to twisted.web.resource.
-
-        The default implementation creates a JsonResource and calls each function in
-        `servlets` to register servlets against it.
-        """
-        resources = super().create_resource_dict()
-        resources["/_matrix/media"] = self.hs.get_media_repository_resource()
-        return resources
-
     def _assert_small_png(self, json_body: JsonDict) -> None:
         """Assert properties from the SMALL_PNG test image."""
         self.assertTrue(json_body["og:image"].startswith("mxc://"))
@@ -309,7 +298,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -334,7 +323,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         # Check the cache returns the correct response
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
         )
 
@@ -352,7 +341,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         # Check the database cache returns the correct response
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
         )
 
@@ -375,7 +364,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -405,7 +394,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -441,7 +430,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -482,7 +471,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -517,7 +506,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -550,7 +539,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
             await_result=False,
         )
@@ -580,7 +569,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
 
@@ -603,7 +592,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
 
@@ -622,7 +611,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         """
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://192.168.1.1",
+            "/_matrix/client/v1/media/preview_url?url=http://192.168.1.1",
             shorthand=False,
         )
 
@@ -640,7 +629,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         """
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://1.1.1.2",
+            "/_matrix/client/v1/media/preview_url?url=http://1.1.1.2",
             shorthand=False,
         )
 
@@ -659,7 +648,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
             await_result=False,
         )
@@ -696,7 +685,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
         self.assertEqual(channel.code, 502)
@@ -718,7 +707,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
 
@@ -741,7 +730,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
 
@@ -760,7 +749,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         """
         channel = self.make_request(
             "OPTIONS",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
         )
         self.assertEqual(channel.code, 204)
@@ -774,7 +763,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         # Build and make a request to the server
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://example.com",
+            "/_matrix/client/v1/media/preview_url?url=http://example.com",
             shorthand=False,
             await_result=False,
         )
@@ -827,7 +816,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -877,7 +866,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -919,7 +908,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -959,7 +948,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -1000,7 +989,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            f"/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?{query_params}",
+            f"/_matrix/client/v1/media/preview_url?{query_params}",
             shorthand=False,
         )
         self.pump()
@@ -1021,7 +1010,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://matrix.org",
+            "/_matrix/client/v1/media/preview_url?url=http://matrix.org",
             shorthand=False,
             await_result=False,
         )
@@ -1058,7 +1047,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1118,7 +1107,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1167,7 +1156,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.hulu.com/watch/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://www.hulu.com/watch/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1212,7 +1201,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://twitter.com/matrixdotorg/status/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1241,7 +1230,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1333,7 +1322,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
+            "/_matrix/client/v1/media/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
             shorthand=False,
             await_result=False,
         )
@@ -1374,7 +1363,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url=http://cdn.twitter.com/matrixdotorg",
+            "/_matrix/client/v1/media/preview_url?url=http://cdn.twitter.com/matrixdotorg",
             shorthand=False,
             await_result=False,
         )
@@ -1416,7 +1405,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         # Check fetching
         channel = self.make_request(
             "GET",
-            f"/_matrix/media/v3/download/{host}/{media_id}",
+            f"/_matrix/client/v1/media/download/{host}/{media_id}",
             shorthand=False,
             await_result=False,
         )
@@ -1429,7 +1418,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            f"/_matrix/media/v3/download/{host}/{media_id}",
+            f"/_matrix/client/v1/download/{host}/{media_id}",
             shorthand=False,
             await_result=False,
         )
@@ -1464,7 +1453,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         # Check fetching
         channel = self.make_request(
             "GET",
-            f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
+            f"/_matrix/client/v1/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
             shorthand=False,
             await_result=False,
         )
@@ -1482,7 +1471,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            f"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
+            f"/_matrix/client/v1/media/thumbnail/{host}/{media_id}?width=32&height=32&method=scale",
             shorthand=False,
             await_result=False,
         )
@@ -1532,8 +1521,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url="
-            + bad_url,
+            "/_matrix/client/v1/media/preview_url?url=" + bad_url,
             shorthand=False,
             await_result=False,
         )
@@ -1542,8 +1530,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url="
-            + good_url,
+            "/_matrix/client/v1/media/preview_url?url=" + good_url,
             shorthand=False,
             await_result=False,
         )
@@ -1575,8 +1562,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
 
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/preview_url?url="
-            + bad_url,
+            "/_matrix/client/v1/media/preview_url?url=" + bad_url,
             shorthand=False,
             await_result=False,
         )
@@ -1584,7 +1570,7 @@ class UnstableURLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 403, channel.result)
 
 
-class UnstableMediaConfigTest(unittest.HomeserverTestCase):
+class MediaConfigTest(unittest.HomeserverTestCase):
     servlets = [
         media.register_servlets,
         admin.register_servlets,
@@ -1595,7 +1581,6 @@ class UnstableMediaConfigTest(unittest.HomeserverTestCase):
         self, reactor: ThreadedMemoryReactorClock, clock: Clock
     ) -> HomeServer:
         config = self.default_config()
-        config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
 
         self.storage_path = self.mktemp()
         self.media_store_path = self.mktemp()
@@ -1622,7 +1607,7 @@ class UnstableMediaConfigTest(unittest.HomeserverTestCase):
     def test_media_config(self) -> None:
         channel = self.make_request(
             "GET",
-            "/_matrix/client/unstable/org.matrix.msc3916/media/config",
+            "/_matrix/client/v1/media/config",
             shorthand=False,
             access_token=self.tok,
         )
@@ -1825,13 +1810,19 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
         )
         assert channel.code == 200
 
+    @override_config(
+        {
+            "remote_media_download_burst_count": "87M",
+        }
+    )
     @patch(
         "synapse.http.matrixfederationclient.read_multipart_response",
         read_multipart_response_30MiB,
     )
-    def test_download_ratelimit_max_size_sub(self) -> None:
+    def test_download_ratelimit_unknown_length(self) -> None:
         """
-        Test that if no content-length is provided, the default max size is applied instead
+        Test that if no content-length is provided, ratelimiting is still applied after
+        media is downloaded and length is known
         """
 
         # mock out actually sending the request
@@ -1847,8 +1838,9 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
 
         self.client._send_request = _send_request  # type: ignore
 
-        # ten requests should go through using the max size (500MB/50MB)
-        for i in range(10):
+        # first 3 will go through (note that 3rd request technically violates rate limit but
+        # that since the ratelimiting is applied *after* download it goes through, but next one fails)
+        for i in range(3):
             channel2 = self.make_request(
                 "GET",
                 f"/_matrix/client/v1/media/download/remote.org/abc{i}",
@@ -1857,7 +1849,7 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
             )
             assert channel2.code == 200
 
-        # eleventh will hit ratelimit
+        # 4th will hit ratelimit
         channel3 = self.make_request(
             "GET",
             "/_matrix/client/v1/media/download/remote.org/abcd",
@@ -1866,6 +1858,39 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
         )
         assert channel3.code == 429
 
+    @override_config({"max_upload_size": "29M"})
+    @patch(
+        "synapse.http.matrixfederationclient.read_multipart_response",
+        read_multipart_response_30MiB,
+    )
+    def test_max_download_respected(self) -> None:
+        """
+        Test that the max download size is enforced - note that max download size is determined
+        by the max_upload_size
+        """
+
+        # mock out actually sending the request, returns a 30MiB response
+        async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
+            resp = MagicMock(spec=IResponse)
+            resp.code = 200
+            resp.length = 31457280
+            resp.headers = Headers(
+                {"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]}
+            )
+            resp.phrase = b"OK"
+            return resp
+
+        self.client._send_request = _send_request  # type: ignore
+
+        channel = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/remote.org/abcd",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        assert channel.code == 502
+        assert channel.json_body["errcode"] == "M_TOO_LARGE"
+
     def test_file_download(self) -> None:
         content = io.BytesIO(b"file_to_stream")
         content_uri = self.get_success(
@@ -1899,7 +1924,7 @@ input_values = [(x,) for x in test_images]
 
 
 @parameterized_class(("test_image",), input_values)
-class DownloadTestCase(unittest.HomeserverTestCase):
+class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase):
     test_image: ClassVar[TestImage]
     servlets = [
         media.register_servlets,
@@ -1932,7 +1957,7 @@ class DownloadTestCase(unittest.HomeserverTestCase):
             """A mock for MatrixFederationHttpClient.federation_get_file."""
 
             def write_to(
-                r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]]
+                r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]],
             ) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
                 data, response = r
                 output_stream.write(data)
@@ -1966,7 +1991,7 @@ class DownloadTestCase(unittest.HomeserverTestCase):
             """A mock for MatrixFederationHttpClient.get_file."""
 
             def write_to(
-                r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]
+                r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]],
             ) -> Tuple[int, Dict[bytes, List[bytes]]]:
                 data, response = r
                 output_stream.write(data)
@@ -2005,7 +2030,6 @@ class DownloadTestCase(unittest.HomeserverTestCase):
             "config": {"directory": self.storage_path},
         }
         config["media_storage_providers"] = [provider_config]
-        config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
 
         hs = self.setup_test_homeserver(config=config, federation_http_client=client)
 
@@ -2164,7 +2188,7 @@ class DownloadTestCase(unittest.HomeserverTestCase):
 
     def test_unknown_federation_endpoint(self) -> None:
         """
-        Test that if the downloadd request to remote federation endpoint returns a 404
+        Test that if the download request to remote federation endpoint returns a 404
         we fall back to the _matrix/media endpoint
         """
         channel = self.make_request(
@@ -2210,3 +2234,444 @@ class DownloadTestCase(unittest.HomeserverTestCase):
 
         self.pump()
         self.assertEqual(channel.code, 200)
+
+    def test_thumbnail_crop(self) -> None:
+        """Test that a cropped remote thumbnail is available."""
+        self._test_thumbnail(
+            "crop",
+            self.test_image.expected_cropped,
+            expected_found=self.test_image.expected_found,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+    def test_thumbnail_scale(self) -> None:
+        """Test that a scaled remote thumbnail is available."""
+        self._test_thumbnail(
+            "scale",
+            self.test_image.expected_scaled,
+            expected_found=self.test_image.expected_found,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+    def test_invalid_type(self) -> None:
+        """An invalid thumbnail type is never available."""
+        self._test_thumbnail(
+            "invalid",
+            None,
+            expected_found=False,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+    @unittest.override_config(
+        {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]}
+    )
+    def test_no_thumbnail_crop(self) -> None:
+        """
+        Override the config to generate only scaled thumbnails, but request a cropped one.
+        """
+        self._test_thumbnail(
+            "crop",
+            None,
+            expected_found=False,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+    @unittest.override_config(
+        {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]}
+    )
+    def test_no_thumbnail_scale(self) -> None:
+        """
+        Override the config to generate only cropped thumbnails, but request a scaled one.
+        """
+        self._test_thumbnail(
+            "scale",
+            None,
+            expected_found=False,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+    def test_thumbnail_repeated_thumbnail(self) -> None:
+        """Test that fetching the same thumbnail works, and deleting the on disk
+        thumbnail regenerates it.
+        """
+        self._test_thumbnail(
+            "scale",
+            self.test_image.expected_scaled,
+            expected_found=self.test_image.expected_found,
+            unable_to_thumbnail=self.test_image.unable_to_thumbnail,
+        )
+
+        if not self.test_image.expected_found:
+            return
+
+        # Fetching again should work, without re-requesting the image from the
+        # remote.
+        params = "?width=32&height=32&method=scale"
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/thumbnail/{self.remote}/{self.media_id}{params}",
+            shorthand=False,
+            await_result=False,
+            access_token=self.tok,
+        )
+        self.pump()
+
+        self.assertEqual(channel.code, 200)
+        if self.test_image.expected_scaled:
+            self.assertEqual(
+                channel.result["body"],
+                self.test_image.expected_scaled,
+                channel.result["body"],
+            )
+
+        # Deleting the thumbnail on disk then re-requesting it should work as
+        # Synapse should regenerate missing thumbnails.
+        info = self.get_success(
+            self.store.get_cached_remote_media(self.remote, self.media_id)
+        )
+        assert info is not None
+        file_id = info.filesystem_id
+
+        thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir(
+            self.remote, file_id
+        )
+        shutil.rmtree(thumbnail_dir, ignore_errors=True)
+
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/thumbnail/{self.remote}/{self.media_id}{params}",
+            shorthand=False,
+            await_result=False,
+            access_token=self.tok,
+        )
+        self.pump()
+
+        self.assertEqual(channel.code, 200)
+        if self.test_image.expected_scaled:
+            self.assertEqual(
+                channel.result["body"],
+                self.test_image.expected_scaled,
+                channel.result["body"],
+            )
+
+    def _test_thumbnail(
+        self,
+        method: str,
+        expected_body: Optional[bytes],
+        expected_found: bool,
+        unable_to_thumbnail: bool = False,
+    ) -> None:
+        """Test the given thumbnailing method works as expected.
+
+        Args:
+            method: The thumbnailing method to use (crop, scale).
+            expected_body: The expected bytes from thumbnailing, or None if
+                test should just check for a valid image.
+            expected_found: True if the file should exist on the server, or False if
+                a 404/400 is expected.
+            unable_to_thumbnail: True if we expect the thumbnailing to fail (400), or
+                False if the thumbnailing should succeed or a normal 404 is expected.
+        """
+
+        params = "?width=32&height=32&method=" + method
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/thumbnail/{self.remote}/{self.media_id}{params}",
+            shorthand=False,
+            await_result=False,
+            access_token=self.tok,
+        )
+        self.pump()
+        headers = {
+            b"Content-Length": [b"%d" % (len(self.test_image.data))],
+            b"Content-Type": [self.test_image.content_type],
+        }
+        self.fetches[0][0].callback(
+            (self.test_image.data, (len(self.test_image.data), headers))
+        )
+        self.pump()
+        if expected_found:
+            self.assertEqual(channel.code, 200)
+
+            self.assertEqual(
+                channel.headers.getRawHeaders(b"Cross-Origin-Resource-Policy"),
+                [b"cross-origin"],
+            )
+
+            if expected_body is not None:
+                self.assertEqual(
+                    channel.result["body"], expected_body, channel.result["body"]
+                )
+            else:
+                # ensure that the result is at least some valid image
+                Image.open(io.BytesIO(channel.result["body"]))
+        elif unable_to_thumbnail:
+            # A 400 with a JSON body.
+            self.assertEqual(channel.code, 400)
+            self.assertEqual(
+                channel.json_body,
+                {
+                    "errcode": "M_UNKNOWN",
+                    "error": "Cannot find any thumbnails for the requested media ('/_matrix/client/v1/media/thumbnail/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)",
+                },
+            )
+        else:
+            # A 404 with a JSON body.
+            self.assertEqual(channel.code, 404)
+            self.assertEqual(
+                channel.json_body,
+                {
+                    "errcode": "M_NOT_FOUND",
+                    "error": "Not found '/_matrix/client/v1/media/thumbnail/example.com/12345'",
+                },
+            )
+
+    @parameterized.expand([("crop", 16), ("crop", 64), ("scale", 16), ("scale", 64)])
+    def test_same_quality(self, method: str, desired_size: int) -> None:
+        """Test that choosing between thumbnails with the same quality rating succeeds.
+
+        We are not particular about which thumbnail is chosen."""
+
+        content_type = self.test_image.content_type.decode()
+        media_repo = self.hs.get_media_repository()
+        thumbnail_provider = ThumbnailProvider(
+            self.hs, media_repo, media_repo.media_storage
+        )
+
+        self.assertIsNotNone(
+            thumbnail_provider._select_thumbnail(
+                desired_width=desired_size,
+                desired_height=desired_size,
+                desired_method=method,
+                desired_type=content_type,
+                # Provide two identical thumbnails which are guaranteed to have the same
+                # quality rating.
+                thumbnail_infos=[
+                    ThumbnailInfo(
+                        width=32,
+                        height=32,
+                        method=method,
+                        type=content_type,
+                        length=256,
+                    ),
+                    ThumbnailInfo(
+                        width=32,
+                        height=32,
+                        method=method,
+                        type=content_type,
+                        length=256,
+                    ),
+                ],
+                file_id=f"image{self.test_image.extension.decode()}",
+                url_cache=False,
+                server_name=None,
+            )
+        )
+
+
+configs = [
+    {"extra_config": {"dynamic_thumbnails": True}},
+    {"extra_config": {"dynamic_thumbnails": False}},
+]
+
+
+@parameterized_class(configs)
+class AuthenticatedMediaTestCase(unittest.HomeserverTestCase):
+    extra_config: Dict[str, Any]
+    servlets = [
+        media.register_servlets,
+        login.register_servlets,
+        admin.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+        config = self.default_config()
+
+        self.clock = clock
+        self.storage_path = self.mktemp()
+        self.media_store_path = self.mktemp()
+        os.mkdir(self.storage_path)
+        os.mkdir(self.media_store_path)
+        config["media_store_path"] = self.media_store_path
+        config["enable_authenticated_media"] = True
+
+        provider_config = {
+            "module": "synapse.media.storage_provider.FileStorageProviderBackend",
+            "store_local": True,
+            "store_synchronous": False,
+            "store_remote": True,
+            "config": {"directory": self.storage_path},
+        }
+
+        config["media_storage_providers"] = [provider_config]
+        config.update(self.extra_config)
+
+        return self.setup_test_homeserver(config=config)
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.repo = hs.get_media_repository()
+        self.client = hs.get_federation_http_client()
+        self.store = hs.get_datastores().main
+        self.user = self.register_user("user", "pass")
+        self.tok = self.login("user", "pass")
+
+    def create_resource_dict(self) -> Dict[str, Resource]:
+        resources = super().create_resource_dict()
+        resources["/_matrix/media"] = self.hs.get_media_repository_resource()
+        return resources
+
+    def test_authenticated_media(self) -> None:
+        # upload some local media with authentication on
+        channel = self.make_request(
+            "POST",
+            "_matrix/media/v3/upload?filename=test_png_upload",
+            SMALL_PNG,
+            self.tok,
+            shorthand=False,
+            content_type=b"image/png",
+            custom_headers=[("Content-Length", str(67))],
+        )
+        self.assertEqual(channel.code, 200)
+        res = channel.json_body.get("content_uri")
+        assert res is not None
+        uri = res.split("mxc://")[1]
+
+        # request media over authenticated endpoint, should be found
+        channel2 = self.make_request(
+            "GET",
+            f"_matrix/client/v1/media/download/{uri}",
+            access_token=self.tok,
+            shorthand=False,
+        )
+        self.assertEqual(channel2.code, 200)
+
+        # request same media over unauthenticated media, should raise 404 not found
+        channel3 = self.make_request(
+            "GET", f"_matrix/media/v3/download/{uri}", shorthand=False
+        )
+        self.assertEqual(channel3.code, 404)
+
+        # check thumbnails as well
+        params = "?width=32&height=32&method=crop"
+        channel4 = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/thumbnail/{uri}{params}",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        self.assertEqual(channel4.code, 200)
+
+        params = "?width=32&height=32&method=crop"
+        channel5 = self.make_request(
+            "GET",
+            f"/_matrix/media/r0/thumbnail/{uri}{params}",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        self.assertEqual(channel5.code, 404)
+
+        # Inject a piece of remote media.
+        file_id = "abcdefg12345"
+        file_info = FileInfo(server_name="lonelyIsland", file_id=file_id)
+
+        media_storage = self.hs.get_media_repository().media_storage
+
+        ctx = media_storage.store_into_file(file_info)
+        (f, fname) = self.get_success(ctx.__aenter__())
+        f.write(SMALL_PNG)
+        self.get_success(ctx.__aexit__(None, None, None))
+
+        # we write the authenticated status when storing media, so this should pick up
+        # config and authenticate the media
+        self.get_success(
+            self.store.store_cached_remote_media(
+                origin="lonelyIsland",
+                media_id="52",
+                media_type="image/png",
+                media_length=1,
+                time_now_ms=self.clock.time_msec(),
+                upload_name="remote_test.png",
+                filesystem_id=file_id,
+            )
+        )
+
+        # ensure we have thumbnails for the non-dynamic code path
+        if self.extra_config == {"dynamic_thumbnails": False}:
+            self.get_success(
+                self.repo._generate_thumbnails(
+                    "lonelyIsland", "52", file_id, "image/png"
+                )
+            )
+
+        channel6 = self.make_request(
+            "GET",
+            "_matrix/client/v1/media/download/lonelyIsland/52",
+            access_token=self.tok,
+            shorthand=False,
+        )
+        self.assertEqual(channel6.code, 200)
+
+        channel7 = self.make_request(
+            "GET", f"_matrix/media/v3/download/{uri}", shorthand=False
+        )
+        self.assertEqual(channel7.code, 404)
+
+        params = "?width=32&height=32&method=crop"
+        channel8 = self.make_request(
+            "GET",
+            f"/_matrix/client/v1/media/thumbnail/lonelyIsland/52{params}",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        self.assertEqual(channel8.code, 200)
+
+        channel9 = self.make_request(
+            "GET",
+            f"/_matrix/media/r0/thumbnail/lonelyIsland/52{params}",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        self.assertEqual(channel9.code, 404)
+
+        # Inject a piece of local media that isn't authenticated
+        file_id = "abcdefg123456"
+        file_info = FileInfo(None, file_id=file_id)
+
+        ctx = media_storage.store_into_file(file_info)
+        (f, fname) = self.get_success(ctx.__aenter__())
+        f.write(SMALL_PNG)
+        self.get_success(ctx.__aexit__(None, None, None))
+
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "local_media_repository",
+                {
+                    "media_id": "abcdefg123456",
+                    "media_type": "image/png",
+                    "created_ts": self.clock.time_msec(),
+                    "upload_name": "test_local",
+                    "media_length": 1,
+                    "user_id": "someone",
+                    "url_cache": None,
+                    "authenticated": False,
+                },
+                desc="store_local_media",
+            )
+        )
+
+        # check that unauthenticated media is still available over both endpoints
+        channel9 = self.make_request(
+            "GET",
+            "/_matrix/client/v1/media/download/test/abcdefg123456",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        self.assertEqual(channel9.code, 200)
+
+        channel10 = self.make_request(
+            "GET",
+            "/_matrix/media/r0/download/test/abcdefg123456",
+            shorthand=False,
+            access_token=self.tok,
+        )
+        self.assertEqual(channel10.code, 200)
diff --git a/tests/rest/client/test_models.py b/tests/rest/client/test_models.py
index f8a56c80ca..f14585ccac 100644
--- a/tests/rest/client/test_models.py
+++ b/tests/rest/client/test_models.py
@@ -19,18 +19,12 @@
 #
 #
 import unittest as stdlib_unittest
-from typing import TYPE_CHECKING
 
 from typing_extensions import Literal
 
-from synapse._pydantic_compat import HAS_PYDANTIC_V2
+from synapse._pydantic_compat import BaseModel, ValidationError
 from synapse.types.rest.client import EmailRequestTokenBody
 
-if TYPE_CHECKING or HAS_PYDANTIC_V2:
-    from pydantic.v1 import BaseModel, ValidationError
-else:
-    from pydantic import BaseModel, ValidationError
-
 
 class ThreepidMediumEnumTestCase(stdlib_unittest.TestCase):
     class Model(BaseModel):
diff --git a/tests/rest/client/test_owned_state.py b/tests/rest/client/test_owned_state.py
new file mode 100644
index 0000000000..5fb5767676
--- /dev/null
+++ b/tests/rest/client/test_owned_state.py
@@ -0,0 +1,308 @@
+from http import HTTPStatus
+
+from parameterized import parameterized_class
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.errors import Codes
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util import Clock
+
+from tests.unittest import HomeserverTestCase
+
+_STATE_EVENT_TEST_TYPE = "com.example.test"
+
+# To stress-test parsing, include separator & sigil characters
+_STATE_KEY_SUFFIX = "_state_key_suffix:!@#$123"
+
+
+class OwnedStateBase(HomeserverTestCase):
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.creator_user_id = self.register_user("creator", "pass")
+        self.creator_access_token = self.login("creator", "pass")
+        self.user1_user_id = self.register_user("user1", "pass")
+        self.user1_access_token = self.login("user1", "pass")
+
+        self.room_id = self.helper.create_room_as(
+            self.creator_user_id,
+            tok=self.creator_access_token,
+            is_public=True,
+            extra_content={
+                "power_level_content_override": {
+                    "events": {
+                        _STATE_EVENT_TEST_TYPE: 0,
+                    },
+                },
+            },
+        )
+
+        self.helper.join(
+            room=self.room_id, user=self.user1_user_id, tok=self.user1_access_token
+        )
+
+
+class WithoutOwnedStateTestCase(OwnedStateBase):
+    def default_config(self) -> JsonDict:
+        config = super().default_config()
+        config["default_room_version"] = RoomVersions.V10.identifier
+        return config
+
+    def test_user_can_set_state_with_own_userid_key(self) -> None:
+        self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.user1_user_id}",
+            tok=self.user1_access_token,
+            expect_code=HTTPStatus.OK,
+        )
+
+    def test_room_creator_cannot_set_state_with_own_suffixed_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.creator_user_id}{_STATE_KEY_SUFFIX}",
+            tok=self.creator_access_token,
+            expect_code=HTTPStatus.FORBIDDEN,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.FORBIDDEN,
+            body,
+        )
+
+    def test_room_creator_cannot_set_state_with_other_userid_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.user1_user_id}",
+            tok=self.creator_access_token,
+            expect_code=HTTPStatus.FORBIDDEN,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.FORBIDDEN,
+            body,
+        )
+
+    def test_room_creator_cannot_set_state_with_other_suffixed_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX}",
+            tok=self.creator_access_token,
+            expect_code=HTTPStatus.FORBIDDEN,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.FORBIDDEN,
+            body,
+        )
+
+    def test_room_creator_cannot_set_state_with_nonmember_userid_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key="@notinroom:hs2",
+            tok=self.creator_access_token,
+            expect_code=HTTPStatus.FORBIDDEN,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.FORBIDDEN,
+            body,
+        )
+
+    def test_room_creator_cannot_set_state_with_malformed_userid_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key="@oops",
+            tok=self.creator_access_token,
+            expect_code=HTTPStatus.FORBIDDEN,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.FORBIDDEN,
+            body,
+        )
+
+
+@parameterized_class(
+    ("room_version",),
+    [(i,) for i, v in KNOWN_ROOM_VERSIONS.items() if v.msc3757_enabled],
+)
+class MSC3757OwnedStateTestCase(OwnedStateBase):
+    room_version: str
+
+    def default_config(self) -> JsonDict:
+        config = super().default_config()
+        config["default_room_version"] = self.room_version
+        return config
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        super().prepare(reactor, clock, hs)
+
+        self.user2_user_id = self.register_user("user2", "pass")
+        self.user2_access_token = self.login("user2", "pass")
+
+        self.helper.join(
+            room=self.room_id, user=self.user2_user_id, tok=self.user2_access_token
+        )
+
+    def test_user_can_set_state_with_own_suffixed_key(self) -> None:
+        self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX}",
+            tok=self.user1_access_token,
+            expect_code=HTTPStatus.OK,
+        )
+
+    def test_room_creator_can_set_state_with_other_userid_key(self) -> None:
+        self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.user1_user_id}",
+            tok=self.creator_access_token,
+            expect_code=HTTPStatus.OK,
+        )
+
+    def test_room_creator_can_set_state_with_other_suffixed_key(self) -> None:
+        self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX}",
+            tok=self.creator_access_token,
+            expect_code=HTTPStatus.OK,
+        )
+
+    def test_user_cannot_set_state_with_other_userid_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.user2_user_id}",
+            tok=self.user1_access_token,
+            expect_code=HTTPStatus.FORBIDDEN,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.FORBIDDEN,
+            body,
+        )
+
+    def test_user_cannot_set_state_with_other_suffixed_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.user2_user_id}{_STATE_KEY_SUFFIX}",
+            tok=self.user1_access_token,
+            expect_code=HTTPStatus.FORBIDDEN,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.FORBIDDEN,
+            body,
+        )
+
+    def test_user_cannot_set_state_with_unseparated_suffixed_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX[1:]}",
+            tok=self.user1_access_token,
+            expect_code=HTTPStatus.FORBIDDEN,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.FORBIDDEN,
+            body,
+        )
+
+    def test_user_cannot_set_state_with_misplaced_userid_in_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            # Still put @ at start of state key, because without it, there is no write protection at all
+            state_key=f"@prefix_{self.user1_user_id}{_STATE_KEY_SUFFIX}",
+            tok=self.user1_access_token,
+            expect_code=HTTPStatus.FORBIDDEN,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.FORBIDDEN,
+            body,
+        )
+
+    def test_room_creator_can_set_state_with_nonmember_userid_key(self) -> None:
+        self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key="@notinroom:hs2",
+            tok=self.creator_access_token,
+            expect_code=HTTPStatus.OK,
+        )
+
+    def test_room_creator_cannot_set_state_with_malformed_userid_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key="@oops",
+            tok=self.creator_access_token,
+            expect_code=HTTPStatus.BAD_REQUEST,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.BAD_JSON,
+            body,
+        )
+
+    def test_room_creator_cannot_set_state_with_improperly_suffixed_key(self) -> None:
+        body = self.helper.send_state(
+            self.room_id,
+            _STATE_EVENT_TEST_TYPE,
+            {},
+            state_key=f"{self.creator_user_id}@{_STATE_KEY_SUFFIX[1:]}",
+            tok=self.creator_access_token,
+            expect_code=HTTPStatus.BAD_REQUEST,
+        )
+
+        self.assertEqual(
+            body["errcode"],
+            Codes.BAD_JSON,
+            body,
+        )
diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py
index f98f3f77aa..a92713d220 100644
--- a/tests/rest/client/test_profile.py
+++ b/tests/rest/client/test_profile.py
@@ -20,6 +20,7 @@
 #
 
 """Tests REST events for /profile paths."""
+
 import urllib.parse
 from http import HTTPStatus
 from typing import Any, Dict, Optional
diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py
index 694f143eff..c091f403cc 100644
--- a/tests/rest/client/test_register.py
+++ b/tests/rest/client/test_register.py
@@ -1049,9 +1049,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
 
         # Check that the HTML we're getting is the one we expect when using an
         # invalid/unknown token.
-        expected_html = (
-            self.hs.config.account_validity.account_validity_invalid_token_template.render()
-        )
+        expected_html = self.hs.config.account_validity.account_validity_invalid_token_template.render()
         self.assertEqual(
             channel.result["body"], expected_html.encode("utf8"), channel.result
         )
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index c559dfda83..2ecd37ca1a 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -4,7 +4,7 @@
 # Copyright 2019 The Matrix.org Foundation C.I.C.
 # Copyright 2017 Vector Creations Ltd
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2023-2024 New Vector, Ltd
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -2291,6 +2291,106 @@ class RoomMessageFilterTestCase(RoomBase):
         self.assertEqual(len(chunk), 2, [event["content"] for event in chunk])
 
 
+class RoomDelayedEventTestCase(RoomBase):
+    """Tests delayed events."""
+
+    user_id = "@sid1:red"
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.room_id = self.helper.create_room_as(self.user_id)
+
+    @unittest.override_config({"max_event_delay_duration": "24h"})
+    def test_send_delayed_invalid_event(self) -> None:
+        """Test sending a delayed event with invalid content."""
+        channel = self.make_request(
+            "PUT",
+            (
+                "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000"
+                % self.room_id
+            ).encode("ascii"),
+            {},
+        )
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
+        self.assertNotIn("org.matrix.msc4140.errcode", channel.json_body)
+
+    def test_delayed_event_unsupported_by_default(self) -> None:
+        """Test that sending a delayed event is unsupported with the default config."""
+        channel = self.make_request(
+            "PUT",
+            (
+                "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000"
+                % self.room_id
+            ).encode("ascii"),
+            {"body": "test", "msgtype": "m.text"},
+        )
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
+        self.assertEqual(
+            "M_MAX_DELAY_UNSUPPORTED",
+            channel.json_body.get("org.matrix.msc4140.errcode"),
+            channel.json_body,
+        )
+
+    @unittest.override_config({"max_event_delay_duration": "1000"})
+    def test_delayed_event_exceeds_max_delay(self) -> None:
+        """Test that sending a delayed event fails if its delay is longer than allowed."""
+        channel = self.make_request(
+            "PUT",
+            (
+                "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000"
+                % self.room_id
+            ).encode("ascii"),
+            {"body": "test", "msgtype": "m.text"},
+        )
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
+        self.assertEqual(
+            "M_MAX_DELAY_EXCEEDED",
+            channel.json_body.get("org.matrix.msc4140.errcode"),
+            channel.json_body,
+        )
+
+    @unittest.override_config({"max_event_delay_duration": "24h"})
+    def test_delayed_event_with_negative_delay(self) -> None:
+        """Test that sending a delayed event fails if its delay is negative."""
+        channel = self.make_request(
+            "PUT",
+            (
+                "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=-2000"
+                % self.room_id
+            ).encode("ascii"),
+            {"body": "test", "msgtype": "m.text"},
+        )
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
+        self.assertEqual(
+            Codes.INVALID_PARAM, channel.json_body["errcode"], channel.json_body
+        )
+
+    @unittest.override_config({"max_event_delay_duration": "24h"})
+    def test_send_delayed_message_event(self) -> None:
+        """Test sending a valid delayed message event."""
+        channel = self.make_request(
+            "PUT",
+            (
+                "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000"
+                % self.room_id
+            ).encode("ascii"),
+            {"body": "test", "msgtype": "m.text"},
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+
+    @unittest.override_config({"max_event_delay_duration": "24h"})
+    def test_send_delayed_state_event(self) -> None:
+        """Test sending a valid delayed state event."""
+        channel = self.make_request(
+            "PUT",
+            (
+                "rooms/%s/state/m.room.topic/?org.matrix.msc4140.delay=2000"
+                % self.room_id
+            ).encode("ascii"),
+            {"topic": "This is a topic"},
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+
+
 class RoomSearchTestCase(unittest.HomeserverTestCase):
     servlets = [
         synapse.rest.admin.register_servlets_for_client_rest_resource,
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index cb2888409e..c52a5b2e79 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -20,7 +20,7 @@
 #
 import json
 import logging
-from typing import AbstractSet, Any, Dict, Iterable, List, Optional
+from typing import List
 
 from parameterized import parameterized, parameterized_class
 
@@ -28,19 +28,14 @@ from twisted.test.proto_helpers import MemoryReactor
 
 import synapse.rest.admin
 from synapse.api.constants import (
-    AccountDataTypes,
     EventContentFields,
     EventTypes,
-    HistoryVisibility,
-    Membership,
     ReceiptTypes,
     RelationTypes,
 )
-from synapse.events import EventBase
-from synapse.handlers.sliding_sync import StateValues
 from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync
 from synapse.server import HomeServer
-from synapse.types import JsonDict, RoomStreamToken, StreamKeyType, StreamToken, UserID
+from synapse.types import JsonDict
 from synapse.util import Clock
 
 from tests import unittest
@@ -48,7 +43,6 @@ from tests.federation.transport.test_knocking import (
     KnockingStrippedStateEventHelperMixin,
 )
 from tests.server import TimedOutException
-from tests.test_utils.event_injection import mark_event_as_partial_state
 
 logger = logging.getLogger(__name__)
 
@@ -288,22 +282,33 @@ class SyncTypingTests(unittest.HomeserverTestCase):
         self.assertEqual(200, channel.code)
         next_batch = channel.json_body["next_batch"]
 
-        # This should time out! But it does not, because our stream token is
-        # ahead, and therefore it's saying the typing (that we've actually
-        # already seen) is new, since it's got a token above our new, now-reset
-        # stream token.
-        channel = self.make_request("GET", sync_url % (access_token, next_batch))
-        self.assertEqual(200, channel.code)
-        next_batch = channel.json_body["next_batch"]
-
         # Clear the typing information, so that it doesn't think everything is
-        # in the future.
+        # in the future. This happens automatically when the typing stream
+        # resets.
         typing._reset()
 
-        # Now it SHOULD fail as it never completes!
+        # Nothing new, so we time out.
         with self.assertRaises(TimedOutException):
             self.make_request("GET", sync_url % (access_token, next_batch))
 
+        # Sync and start typing again.
+        sync_channel = self.make_request(
+            "GET", sync_url % (access_token, next_batch), await_result=False
+        )
+        self.assertFalse(sync_channel.is_finished())
+
+        channel = self.make_request(
+            "PUT",
+            typing_url % (room, other_user_id, other_access_token),
+            b'{"typing": true, "timeout": 30000}',
+        )
+        self.assertEqual(200, channel.code)
+
+        # Sync should now return.
+        sync_channel.await_result()
+        self.assertEqual(200, sync_channel.code)
+        next_batch = sync_channel.json_body["next_batch"]
+
 
 class SyncKnockTestCase(KnockingStrippedStateEventHelperMixin):
     servlets = [
@@ -1103,12 +1108,11 @@ class DeviceUnusedFallbackKeySyncTestCase(unittest.HomeserverTestCase):
         self.assertEqual(res, [])
 
         # Upload a fallback key for the user/device
-        fallback_key = {"alg1:k1": "fallback_key1"}
         self.get_success(
             self.e2e_keys_handler.upload_keys_for_user(
                 alice_user_id,
                 test_device_id,
-                {"fallback_keys": fallback_key},
+                {"fallback_keys": {"alg1:k1": "fallback_key1"}},
             )
         )
         # We should now have an unused alg1 key
@@ -1213,2182 +1217,3 @@ class ExcludeRoomTestCase(unittest.HomeserverTestCase):
 
         self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["join"])
         self.assertIn(self.included_room_id, channel.json_body["rooms"]["join"])
-
-
-class SlidingSyncTestCase(unittest.HomeserverTestCase):
-    """
-    Tests regarding MSC3575 Sliding Sync `/sync` endpoint.
-    """
-
-    servlets = [
-        synapse.rest.admin.register_servlets,
-        login.register_servlets,
-        room.register_servlets,
-        sync.register_servlets,
-        devices.register_servlets,
-    ]
-
-    def default_config(self) -> JsonDict:
-        config = super().default_config()
-        # Enable sliding sync
-        config["experimental_features"] = {"msc3575_enabled": True}
-        return config
-
-    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
-        self.store = hs.get_datastores().main
-        self.sync_endpoint = (
-            "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync"
-        )
-        self.store = hs.get_datastores().main
-        self.event_sources = hs.get_event_sources()
-        self.storage_controllers = hs.get_storage_controllers()
-
-    def _assertRequiredStateIncludes(
-        self,
-        actual_required_state: Any,
-        expected_state_events: Iterable[EventBase],
-        exact: bool = False,
-    ) -> None:
-        """
-        Wrapper around `_assertIncludes` to give slightly better looking diff error
-        messages that include some context "$event_id (type, state_key)".
-
-        Args:
-            actual_required_state: The "required_state" of a room from a Sliding Sync
-                request response.
-            expected_state_events: The expected state events to be included in the
-                `actual_required_state`.
-            exact: Whether the actual state should be exactly equal to the expected
-                state (no extras).
-        """
-
-        assert isinstance(actual_required_state, list)
-        for event in actual_required_state:
-            assert isinstance(event, dict)
-
-        self._assertIncludes(
-            {
-                f'{event["event_id"]} ("{event["type"]}", "{event["state_key"]}")'
-                for event in actual_required_state
-            },
-            {
-                f'{event.event_id} ("{event.type}", "{event.state_key}")'
-                for event in expected_state_events
-            },
-            exact=exact,
-            # Message to help understand the diff in context
-            message=str(actual_required_state),
-        )
-
-    def _assertIncludes(
-        self,
-        actual_items: AbstractSet[str],
-        expected_items: AbstractSet[str],
-        exact: bool = False,
-        message: Optional[str] = None,
-    ) -> None:
-        """
-        Assert that all of the `expected_items` are included in the `actual_items`.
-
-        This assert could also be called `assertContains`, `assertItemsInSet`
-
-        Args:
-            actual_items: The container
-            expected_items: The items to check for in the container
-            exact: Whether the actual state should be exactly equal to the expected
-                state (no extras).
-            message: Optional message to include in the failure message.
-        """
-        # Check that each set has the same items
-        if exact and actual_items == expected_items:
-            return
-        # Check for a superset
-        elif not exact and actual_items >= expected_items:
-            return
-
-        expected_lines: List[str] = []
-        for expected_item in expected_items:
-            is_expected_in_actual = expected_item in actual_items
-            expected_lines.append(
-                "{}  {}".format(" " if is_expected_in_actual else "?", expected_item)
-            )
-
-        actual_lines: List[str] = []
-        for actual_item in actual_items:
-            is_actual_in_expected = actual_item in expected_items
-            actual_lines.append(
-                "{}  {}".format("+" if is_actual_in_expected else " ", actual_item)
-            )
-
-        newline = "\n"
-        expected_string = f"Expected items to be in actual ('?' = missing expected items):\n {{\n{newline.join(expected_lines)}\n }}"
-        actual_string = f"Actual ('+' = found expected items):\n {{\n{newline.join(actual_lines)}\n }}"
-        first_message = (
-            "Items must match exactly" if exact else "Some expected items are missing."
-        )
-        diff_message = f"{first_message}\n{expected_string}\n{actual_string}"
-
-        self.fail(f"{diff_message}\n{message}")
-
-    def _add_new_dm_to_global_account_data(
-        self, source_user_id: str, target_user_id: str, target_room_id: str
-    ) -> None:
-        """
-        Helper to handle inserting a new DM for the source user into global account data
-        (handles all of the list merging).
-
-        Args:
-            source_user_id: The user ID of the DM mapping we're going to update
-            target_user_id: User ID of the person the DM is with
-            target_room_id: Room ID of the DM
-        """
-
-        # Get the current DM map
-        existing_dm_map = self.get_success(
-            self.store.get_global_account_data_by_type_for_user(
-                source_user_id, AccountDataTypes.DIRECT
-            )
-        )
-        # Scrutinize the account data since it has no concrete type. We're just copying
-        # everything into a known type. It should be a mapping from user ID to a list of
-        # room IDs. Ignore anything else.
-        new_dm_map: Dict[str, List[str]] = {}
-        if isinstance(existing_dm_map, dict):
-            for user_id, room_ids in existing_dm_map.items():
-                if isinstance(user_id, str) and isinstance(room_ids, list):
-                    for room_id in room_ids:
-                        if isinstance(room_id, str):
-                            new_dm_map[user_id] = new_dm_map.get(user_id, []) + [
-                                room_id
-                            ]
-
-        # Add the new DM to the map
-        new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [
-            target_room_id
-        ]
-        # Save the DM map to global account data
-        self.get_success(
-            self.store.add_account_data_for_user(
-                source_user_id,
-                AccountDataTypes.DIRECT,
-                new_dm_map,
-            )
-        )
-
-    def _create_dm_room(
-        self,
-        inviter_user_id: str,
-        inviter_tok: str,
-        invitee_user_id: str,
-        invitee_tok: str,
-        should_join_room: bool = True,
-    ) -> str:
-        """
-        Helper to create a DM room as the "inviter" and invite the "invitee" user to the
-        room. The "invitee" user also will join the room. The `m.direct` account data
-        will be set for both users.
-        """
-
-        # Create a room and send an invite the other user
-        room_id = self.helper.create_room_as(
-            inviter_user_id,
-            is_public=False,
-            tok=inviter_tok,
-        )
-        self.helper.invite(
-            room_id,
-            src=inviter_user_id,
-            targ=invitee_user_id,
-            tok=inviter_tok,
-            extra_data={"is_direct": True},
-        )
-        if should_join_room:
-            # Person that was invited joins the room
-            self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
-
-        # Mimic the client setting the room as a direct message in the global account
-        # data for both users.
-        self._add_new_dm_to_global_account_data(
-            invitee_user_id, inviter_user_id, room_id
-        )
-        self._add_new_dm_to_global_account_data(
-            inviter_user_id, invitee_user_id, room_id
-        )
-
-        return room_id
-
-    def test_sync_list(self) -> None:
-        """
-        Test that room IDs show up in the Sliding Sync `lists`
-        """
-        alice_user_id = self.register_user("alice", "correcthorse")
-        alice_access_token = self.login(alice_user_id, "correcthorse")
-
-        room_id = self.helper.create_room_as(
-            alice_user_id, tok=alice_access_token, is_public=True
-        )
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 99]],
-                        "required_state": [
-                            ["m.room.join_rules", ""],
-                            ["m.room.history_visibility", ""],
-                            ["m.space.child", "*"],
-                        ],
-                        "timeline_limit": 1,
-                    }
-                }
-            },
-            access_token=alice_access_token,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # Make sure it has the foo-list we requested
-        self.assertListEqual(
-            list(channel.json_body["lists"].keys()),
-            ["foo-list"],
-            channel.json_body["lists"].keys(),
-        )
-
-        # Make sure the list includes the room we are joined to
-        self.assertListEqual(
-            list(channel.json_body["lists"]["foo-list"]["ops"]),
-            [
-                {
-                    "op": "SYNC",
-                    "range": [0, 99],
-                    "room_ids": [room_id],
-                }
-            ],
-            channel.json_body["lists"]["foo-list"],
-        )
-
-    def test_wait_for_sync_token(self) -> None:
-        """
-        Test that worker will wait until it catches up to the given token
-        """
-        alice_user_id = self.register_user("alice", "correcthorse")
-        alice_access_token = self.login(alice_user_id, "correcthorse")
-
-        # Create a future token that will cause us to wait. Since we never send a new
-        # event to reach that future stream_ordering, the worker will wait until the
-        # full timeout.
-        stream_id_gen = self.store.get_events_stream_id_generator()
-        stream_id = self.get_success(stream_id_gen.get_next().__aenter__())
-        current_token = self.event_sources.get_current_token()
-        future_position_token = current_token.copy_and_replace(
-            StreamKeyType.ROOM,
-            RoomStreamToken(stream=stream_id),
-        )
-
-        future_position_token_serialized = self.get_success(
-            future_position_token.to_string(self.store)
-        )
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint + f"?pos={future_position_token_serialized}",
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 99]],
-                        "required_state": [
-                            ["m.room.join_rules", ""],
-                            ["m.room.history_visibility", ""],
-                            ["m.space.child", "*"],
-                        ],
-                        "timeline_limit": 1,
-                    }
-                }
-            },
-            access_token=alice_access_token,
-            await_result=False,
-        )
-        # Block for 10 seconds to make `notifier.wait_for_stream_token(from_token)`
-        # timeout
-        with self.assertRaises(TimedOutException):
-            channel.await_result(timeout_ms=9900)
-        channel.await_result(timeout_ms=200)
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # We expect the next `pos` in the result to be the same as what we requested
-        # with because we weren't able to find anything new yet.
-        self.assertEqual(channel.json_body["pos"], future_position_token_serialized)
-
-    def test_filter_list(self) -> None:
-        """
-        Test that filters apply to `lists`
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        # Create a DM room
-        joined_dm_room_id = self._create_dm_room(
-            inviter_user_id=user1_id,
-            inviter_tok=user1_tok,
-            invitee_user_id=user2_id,
-            invitee_tok=user2_tok,
-            should_join_room=True,
-        )
-        invited_dm_room_id = self._create_dm_room(
-            inviter_user_id=user1_id,
-            inviter_tok=user1_tok,
-            invitee_user_id=user2_id,
-            invitee_tok=user2_tok,
-            should_join_room=False,
-        )
-
-        # Create a normal room
-        room_id = self.helper.create_room_as(user1_id, tok=user2_tok)
-        self.helper.join(room_id, user1_id, tok=user1_tok)
-
-        # Create a room that user1 is invited to
-        invite_room_id = self.helper.create_room_as(user1_id, tok=user2_tok)
-        self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    # Absense of filters does not imply "False" values
-                    "all": {
-                        "ranges": [[0, 99]],
-                        "required_state": [],
-                        "timeline_limit": 1,
-                        "filters": {},
-                    },
-                    # Test single truthy filter
-                    "dms": {
-                        "ranges": [[0, 99]],
-                        "required_state": [],
-                        "timeline_limit": 1,
-                        "filters": {"is_dm": True},
-                    },
-                    # Test single falsy filter
-                    "non-dms": {
-                        "ranges": [[0, 99]],
-                        "required_state": [],
-                        "timeline_limit": 1,
-                        "filters": {"is_dm": False},
-                    },
-                    # Test how multiple filters should stack (AND'd together)
-                    "room-invites": {
-                        "ranges": [[0, 99]],
-                        "required_state": [],
-                        "timeline_limit": 1,
-                        "filters": {"is_dm": False, "is_invite": True},
-                    },
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # Make sure it has the foo-list we requested
-        self.assertListEqual(
-            list(channel.json_body["lists"].keys()),
-            ["all", "dms", "non-dms", "room-invites"],
-            channel.json_body["lists"].keys(),
-        )
-
-        # Make sure the lists have the correct rooms
-        self.assertListEqual(
-            list(channel.json_body["lists"]["all"]["ops"]),
-            [
-                {
-                    "op": "SYNC",
-                    "range": [0, 99],
-                    "room_ids": [
-                        invite_room_id,
-                        room_id,
-                        invited_dm_room_id,
-                        joined_dm_room_id,
-                    ],
-                }
-            ],
-            list(channel.json_body["lists"]["all"]),
-        )
-        self.assertListEqual(
-            list(channel.json_body["lists"]["dms"]["ops"]),
-            [
-                {
-                    "op": "SYNC",
-                    "range": [0, 99],
-                    "room_ids": [invited_dm_room_id, joined_dm_room_id],
-                }
-            ],
-            list(channel.json_body["lists"]["dms"]),
-        )
-        self.assertListEqual(
-            list(channel.json_body["lists"]["non-dms"]["ops"]),
-            [
-                {
-                    "op": "SYNC",
-                    "range": [0, 99],
-                    "room_ids": [invite_room_id, room_id],
-                }
-            ],
-            list(channel.json_body["lists"]["non-dms"]),
-        )
-        self.assertListEqual(
-            list(channel.json_body["lists"]["room-invites"]["ops"]),
-            [
-                {
-                    "op": "SYNC",
-                    "range": [0, 99],
-                    "room_ids": [invite_room_id],
-                }
-            ],
-            list(channel.json_body["lists"]["room-invites"]),
-        )
-
-    def test_sort_list(self) -> None:
-        """
-        Test that the `lists` are sorted by `stream_ordering`
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
-        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
-        room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
-
-        # Activity that will order the rooms
-        self.helper.send(room_id3, "activity in room3", tok=user1_tok)
-        self.helper.send(room_id1, "activity in room1", tok=user1_tok)
-        self.helper.send(room_id2, "activity in room2", tok=user1_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 99]],
-                        "required_state": [
-                            ["m.room.join_rules", ""],
-                            ["m.room.history_visibility", ""],
-                            ["m.space.child", "*"],
-                        ],
-                        "timeline_limit": 1,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # Make sure it has the foo-list we requested
-        self.assertListEqual(
-            list(channel.json_body["lists"].keys()),
-            ["foo-list"],
-            channel.json_body["lists"].keys(),
-        )
-
-        # Make sure the list is sorted in the way we expect
-        self.assertListEqual(
-            list(channel.json_body["lists"]["foo-list"]["ops"]),
-            [
-                {
-                    "op": "SYNC",
-                    "range": [0, 99],
-                    "room_ids": [room_id2, room_id1, room_id3],
-                }
-            ],
-            channel.json_body["lists"]["foo-list"],
-        )
-
-    def test_sliced_windows(self) -> None:
-        """
-        Test that the `lists` `ranges` are sliced correctly. Both sides of each range
-        are inclusive.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-
-        _room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
-        room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
-        room_id3 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
-
-        # Make the Sliding Sync request for a single room
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 0]],
-                        "required_state": [
-                            ["m.room.join_rules", ""],
-                            ["m.room.history_visibility", ""],
-                            ["m.space.child", "*"],
-                        ],
-                        "timeline_limit": 1,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # Make sure it has the foo-list we requested
-        self.assertListEqual(
-            list(channel.json_body["lists"].keys()),
-            ["foo-list"],
-            channel.json_body["lists"].keys(),
-        )
-        # Make sure the list is sorted in the way we expect
-        self.assertListEqual(
-            list(channel.json_body["lists"]["foo-list"]["ops"]),
-            [
-                {
-                    "op": "SYNC",
-                    "range": [0, 0],
-                    "room_ids": [room_id3],
-                }
-            ],
-            channel.json_body["lists"]["foo-list"],
-        )
-
-        # Make the Sliding Sync request for the first two rooms
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            ["m.room.join_rules", ""],
-                            ["m.room.history_visibility", ""],
-                            ["m.space.child", "*"],
-                        ],
-                        "timeline_limit": 1,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # Make sure it has the foo-list we requested
-        self.assertListEqual(
-            list(channel.json_body["lists"].keys()),
-            ["foo-list"],
-            channel.json_body["lists"].keys(),
-        )
-        # Make sure the list is sorted in the way we expect
-        self.assertListEqual(
-            list(channel.json_body["lists"]["foo-list"]["ops"]),
-            [
-                {
-                    "op": "SYNC",
-                    "range": [0, 1],
-                    "room_ids": [room_id3, room_id2],
-                }
-            ],
-            channel.json_body["lists"]["foo-list"],
-        )
-
-    def test_rooms_limited_initial_sync(self) -> None:
-        """
-        Test that we mark `rooms` as `limited=True` when we saturate the `timeline_limit`
-        on initial sync.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.send(room_id1, "activity1", tok=user2_tok)
-        self.helper.send(room_id1, "activity2", tok=user2_tok)
-        event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok)
-        event_pos3 = self.get_success(
-            self.store.get_position_for_event(event_response3["event_id"])
-        )
-        event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok)
-        event_pos4 = self.get_success(
-            self.store.get_position_for_event(event_response4["event_id"])
-        )
-        event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok)
-        user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [],
-                        "timeline_limit": 3,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # We expect to saturate the `timeline_limit` (there are more than 3 messages in the room)
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["limited"],
-            True,
-            channel.json_body["rooms"][room_id1],
-        )
-        # Check to make sure the latest events are returned
-        self.assertEqual(
-            [
-                event["event_id"]
-                for event in channel.json_body["rooms"][room_id1]["timeline"]
-            ],
-            [
-                event_response4["event_id"],
-                event_response5["event_id"],
-                user1_join_response["event_id"],
-            ],
-            channel.json_body["rooms"][room_id1]["timeline"],
-        )
-
-        # Check to make sure the `prev_batch` points at the right place
-        prev_batch_token = self.get_success(
-            StreamToken.from_string(
-                self.store, channel.json_body["rooms"][room_id1]["prev_batch"]
-            )
-        )
-        prev_batch_room_stream_token_serialized = self.get_success(
-            prev_batch_token.room_key.to_string(self.store)
-        )
-        # If we use the `prev_batch` token to look backwards, we should see `event3`
-        # next so make sure the token encompasses it
-        self.assertEqual(
-            event_pos3.persisted_after(prev_batch_token.room_key),
-            False,
-            f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be >= event_pos3={self.get_success(event_pos3.to_room_stream_token().to_string(self.store))}",
-        )
-        # If we use the `prev_batch` token to look backwards, we shouldn't see `event4`
-        # anymore since it was just returned in this response.
-        self.assertEqual(
-            event_pos4.persisted_after(prev_batch_token.room_key),
-            True,
-            f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be < event_pos4={self.get_success(event_pos4.to_room_stream_token().to_string(self.store))}",
-        )
-
-        # With no `from_token` (initial sync), it's all historical since there is no
-        # "live" range
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["num_live"],
-            0,
-            channel.json_body["rooms"][room_id1],
-        )
-
-    def test_rooms_not_limited_initial_sync(self) -> None:
-        """
-        Test that we mark `rooms` as `limited=False` when there are no more events to
-        paginate to.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.send(room_id1, "activity1", tok=user2_tok)
-        self.helper.send(room_id1, "activity2", tok=user2_tok)
-        self.helper.send(room_id1, "activity3", tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        # Make the Sliding Sync request
-        timeline_limit = 100
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [],
-                        "timeline_limit": timeline_limit,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # The timeline should be `limited=False` because we have all of the events (no
-        # more to paginate to)
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["limited"],
-            False,
-            channel.json_body["rooms"][room_id1],
-        )
-        expected_number_of_events = 9
-        # We're just looking to make sure we got all of the events before hitting the `timeline_limit`
-        self.assertEqual(
-            len(channel.json_body["rooms"][room_id1]["timeline"]),
-            expected_number_of_events,
-            channel.json_body["rooms"][room_id1]["timeline"],
-        )
-        self.assertLessEqual(expected_number_of_events, timeline_limit)
-
-        # With no `from_token` (initial sync), it's all historical since there is no
-        # "live" token range.
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["num_live"],
-            0,
-            channel.json_body["rooms"][room_id1],
-        )
-
-    def test_rooms_incremental_sync(self) -> None:
-        """
-        Test `rooms` data during an incremental sync after an initial sync.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.send(room_id1, "activity before initial sync1", tok=user2_tok)
-
-        # Make an initial Sliding Sync request to grab a token. This is also a sanity
-        # check that we can go from initial to incremental sync.
-        sync_params = {
-            "lists": {
-                "foo-list": {
-                    "ranges": [[0, 1]],
-                    "required_state": [],
-                    "timeline_limit": 3,
-                }
-            }
-        }
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            sync_params,
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-        next_pos = channel.json_body["pos"]
-
-        # Send some events but don't send enough to saturate the `timeline_limit`.
-        # We want to later test that we only get the new events since the `next_pos`
-        event_response2 = self.helper.send(room_id1, "activity after2", tok=user2_tok)
-        event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
-
-        # Make an incremental Sliding Sync request (what we're trying to test)
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint + f"?pos={next_pos}",
-            sync_params,
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # We only expect to see the new events since the last sync which isn't enough to
-        # fill up the `timeline_limit`.
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["limited"],
-            False,
-            f'Our `timeline_limit` was {sync_params["lists"]["foo-list"]["timeline_limit"]} '
-            + f'and {len(channel.json_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
-            + str(channel.json_body["rooms"][room_id1]),
-        )
-        # Check to make sure the latest events are returned
-        self.assertEqual(
-            [
-                event["event_id"]
-                for event in channel.json_body["rooms"][room_id1]["timeline"]
-            ],
-            [
-                event_response2["event_id"],
-                event_response3["event_id"],
-            ],
-            channel.json_body["rooms"][room_id1]["timeline"],
-        )
-
-        # All events are "live"
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["num_live"],
-            2,
-            channel.json_body["rooms"][room_id1],
-        )
-
-    def test_rooms_newly_joined_incremental_sync(self) -> None:
-        """
-        Test that when we make an incremental sync with a `newly_joined` `rooms`, we are
-        able to see some historical events before the `from_token`.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.send(room_id1, "activity before token1", tok=user2_tok)
-        event_response2 = self.helper.send(
-            room_id1, "activity before token2", tok=user2_tok
-        )
-
-        from_token = self.event_sources.get_current_token()
-
-        # Join the room after the `from_token` which will make us consider this room as
-        # `newly_joined`.
-        user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        # Send some events but don't send enough to saturate the `timeline_limit`.
-        # We want to later test that we only get the new events since the `next_pos`
-        event_response3 = self.helper.send(
-            room_id1, "activity after token3", tok=user2_tok
-        )
-        event_response4 = self.helper.send(
-            room_id1, "activity after token4", tok=user2_tok
-        )
-
-        # The `timeline_limit` is set to 4 so we can at least see one historical event
-        # before the `from_token`. We should see historical events because this is a
-        # `newly_joined` room.
-        timeline_limit = 4
-        # Make an incremental Sliding Sync request (what we're trying to test)
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint
-            + f"?pos={self.get_success(from_token.to_string(self.store))}",
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [],
-                        "timeline_limit": timeline_limit,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # We should see the new events and the rest should be filled with historical
-        # events which will make us `limited=True` since there are more to paginate to.
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["limited"],
-            True,
-            f"Our `timeline_limit` was {timeline_limit} "
-            + f'and {len(channel.json_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
-            + str(channel.json_body["rooms"][room_id1]),
-        )
-        # Check to make sure that the "live" and historical events are returned
-        self.assertEqual(
-            [
-                event["event_id"]
-                for event in channel.json_body["rooms"][room_id1]["timeline"]
-            ],
-            [
-                event_response2["event_id"],
-                user1_join_response["event_id"],
-                event_response3["event_id"],
-                event_response4["event_id"],
-            ],
-            channel.json_body["rooms"][room_id1]["timeline"],
-        )
-
-        # Only events after the `from_token` are "live" (join, event3, event4)
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["num_live"],
-            3,
-            channel.json_body["rooms"][room_id1],
-        )
-
-    def test_rooms_invite_shared_history_initial_sync(self) -> None:
-        """
-        Test that `rooms` we are invited to have some stripped `invite_state` during an
-        initial sync.
-
-        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
-        but we also shouldn't see any timeline events because the history visiblity is
-        `shared` and we haven't joined the room yet.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user1 = UserID.from_string(user1_id)
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-        user2 = UserID.from_string(user2_id)
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        # Ensure we're testing with a room with `shared` history visibility which means
-        # history visible until you actually join the room.
-        history_visibility_response = self.helper.get_state(
-            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
-        )
-        self.assertEqual(
-            history_visibility_response.get("history_visibility"),
-            HistoryVisibility.SHARED,
-        )
-
-        self.helper.send(room_id1, "activity before1", tok=user2_tok)
-        self.helper.send(room_id1, "activity before2", tok=user2_tok)
-        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
-        self.helper.send(room_id1, "activity after3", tok=user2_tok)
-        self.helper.send(room_id1, "activity after4", tok=user2_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [],
-                        "timeline_limit": 3,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # `timeline` is omitted for `invite` rooms with `stripped_state`
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("timeline"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("num_live"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("limited"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("prev_batch"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `required_state` is omitted for `invite` rooms with `stripped_state`
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("required_state"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # We should have some `stripped_state` so the potential joiner can identify the
-        # room (we don't care about the order).
-        self.assertCountEqual(
-            channel.json_body["rooms"][room_id1]["invite_state"],
-            [
-                {
-                    "content": {"creator": user2_id, "room_version": "10"},
-                    "sender": user2_id,
-                    "state_key": "",
-                    "type": "m.room.create",
-                },
-                {
-                    "content": {"join_rule": "public"},
-                    "sender": user2_id,
-                    "state_key": "",
-                    "type": "m.room.join_rules",
-                },
-                {
-                    "content": {"displayname": user2.localpart, "membership": "join"},
-                    "sender": user2_id,
-                    "state_key": user2_id,
-                    "type": "m.room.member",
-                },
-                {
-                    "content": {"displayname": user1.localpart, "membership": "invite"},
-                    "sender": user2_id,
-                    "state_key": user1_id,
-                    "type": "m.room.member",
-                },
-            ],
-            channel.json_body["rooms"][room_id1]["invite_state"],
-        )
-
-    def test_rooms_invite_shared_history_incremental_sync(self) -> None:
-        """
-        Test that `rooms` we are invited to have some stripped `invite_state` during an
-        incremental sync.
-
-        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
-        but we also shouldn't see any timeline events because the history visiblity is
-        `shared` and we haven't joined the room yet.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user1 = UserID.from_string(user1_id)
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-        user2 = UserID.from_string(user2_id)
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        # Ensure we're testing with a room with `shared` history visibility which means
-        # history visible until you actually join the room.
-        history_visibility_response = self.helper.get_state(
-            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
-        )
-        self.assertEqual(
-            history_visibility_response.get("history_visibility"),
-            HistoryVisibility.SHARED,
-        )
-
-        self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
-        self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
-        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
-        self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
-        self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
-
-        from_token = self.event_sources.get_current_token()
-
-        self.helper.send(room_id1, "activity after token5", tok=user2_tok)
-        self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint
-            + f"?pos={self.get_success(from_token.to_string(self.store))}",
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [],
-                        "timeline_limit": 3,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # `timeline` is omitted for `invite` rooms with `stripped_state`
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("timeline"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("num_live"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("limited"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("prev_batch"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `required_state` is omitted for `invite` rooms with `stripped_state`
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("required_state"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # We should have some `stripped_state` so the potential joiner can identify the
-        # room (we don't care about the order).
-        self.assertCountEqual(
-            channel.json_body["rooms"][room_id1]["invite_state"],
-            [
-                {
-                    "content": {"creator": user2_id, "room_version": "10"},
-                    "sender": user2_id,
-                    "state_key": "",
-                    "type": "m.room.create",
-                },
-                {
-                    "content": {"join_rule": "public"},
-                    "sender": user2_id,
-                    "state_key": "",
-                    "type": "m.room.join_rules",
-                },
-                {
-                    "content": {"displayname": user2.localpart, "membership": "join"},
-                    "sender": user2_id,
-                    "state_key": user2_id,
-                    "type": "m.room.member",
-                },
-                {
-                    "content": {"displayname": user1.localpart, "membership": "invite"},
-                    "sender": user2_id,
-                    "state_key": user1_id,
-                    "type": "m.room.member",
-                },
-            ],
-            channel.json_body["rooms"][room_id1]["invite_state"],
-        )
-
-    def test_rooms_invite_world_readable_history_initial_sync(self) -> None:
-        """
-        Test that `rooms` we are invited to have some stripped `invite_state` during an
-        initial sync.
-
-        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
-        but depending on the semantics we decide, we could potentially see some
-        historical events before/after the `from_token` because the history is
-        `world_readable`. Same situation for events after the `from_token` if the
-        history visibility was set to `invited`.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user1 = UserID.from_string(user1_id)
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-        user2 = UserID.from_string(user2_id)
-
-        room_id1 = self.helper.create_room_as(
-            user2_id,
-            tok=user2_tok,
-            extra_content={
-                "preset": "public_chat",
-                "initial_state": [
-                    {
-                        "content": {
-                            "history_visibility": HistoryVisibility.WORLD_READABLE
-                        },
-                        "state_key": "",
-                        "type": EventTypes.RoomHistoryVisibility,
-                    }
-                ],
-            },
-        )
-        # Ensure we're testing with a room with `world_readable` history visibility
-        # which means events are visible to anyone even without membership.
-        history_visibility_response = self.helper.get_state(
-            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
-        )
-        self.assertEqual(
-            history_visibility_response.get("history_visibility"),
-            HistoryVisibility.WORLD_READABLE,
-        )
-
-        self.helper.send(room_id1, "activity before1", tok=user2_tok)
-        self.helper.send(room_id1, "activity before2", tok=user2_tok)
-        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
-        self.helper.send(room_id1, "activity after3", tok=user2_tok)
-        self.helper.send(room_id1, "activity after4", tok=user2_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [],
-                        # Large enough to see the latest events and before the invite
-                        "timeline_limit": 4,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # `timeline` is omitted for `invite` rooms with `stripped_state`
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("timeline"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("num_live"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("limited"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("prev_batch"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `required_state` is omitted for `invite` rooms with `stripped_state`
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("required_state"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # We should have some `stripped_state` so the potential joiner can identify the
-        # room (we don't care about the order).
-        self.assertCountEqual(
-            channel.json_body["rooms"][room_id1]["invite_state"],
-            [
-                {
-                    "content": {"creator": user2_id, "room_version": "10"},
-                    "sender": user2_id,
-                    "state_key": "",
-                    "type": "m.room.create",
-                },
-                {
-                    "content": {"join_rule": "public"},
-                    "sender": user2_id,
-                    "state_key": "",
-                    "type": "m.room.join_rules",
-                },
-                {
-                    "content": {"displayname": user2.localpart, "membership": "join"},
-                    "sender": user2_id,
-                    "state_key": user2_id,
-                    "type": "m.room.member",
-                },
-                {
-                    "content": {"displayname": user1.localpart, "membership": "invite"},
-                    "sender": user2_id,
-                    "state_key": user1_id,
-                    "type": "m.room.member",
-                },
-            ],
-            channel.json_body["rooms"][room_id1]["invite_state"],
-        )
-
-    def test_rooms_invite_world_readable_history_incremental_sync(self) -> None:
-        """
-        Test that `rooms` we are invited to have some stripped `invite_state` during an
-        incremental sync.
-
-        This is an `invite` room so we should only have `stripped_state` (no `timeline`)
-        but depending on the semantics we decide, we could potentially see some
-        historical events before/after the `from_token` because the history is
-        `world_readable`. Same situation for events after the `from_token` if the
-        history visibility was set to `invited`.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user1 = UserID.from_string(user1_id)
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-        user2 = UserID.from_string(user2_id)
-
-        room_id1 = self.helper.create_room_as(
-            user2_id,
-            tok=user2_tok,
-            extra_content={
-                "preset": "public_chat",
-                "initial_state": [
-                    {
-                        "content": {
-                            "history_visibility": HistoryVisibility.WORLD_READABLE
-                        },
-                        "state_key": "",
-                        "type": EventTypes.RoomHistoryVisibility,
-                    }
-                ],
-            },
-        )
-        # Ensure we're testing with a room with `world_readable` history visibility
-        # which means events are visible to anyone even without membership.
-        history_visibility_response = self.helper.get_state(
-            room_id1, EventTypes.RoomHistoryVisibility, tok=user2_tok
-        )
-        self.assertEqual(
-            history_visibility_response.get("history_visibility"),
-            HistoryVisibility.WORLD_READABLE,
-        )
-
-        self.helper.send(room_id1, "activity before invite1", tok=user2_tok)
-        self.helper.send(room_id1, "activity before invite2", tok=user2_tok)
-        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
-        self.helper.send(room_id1, "activity after invite3", tok=user2_tok)
-        self.helper.send(room_id1, "activity after invite4", tok=user2_tok)
-
-        from_token = self.event_sources.get_current_token()
-
-        self.helper.send(room_id1, "activity after token5", tok=user2_tok)
-        self.helper.send(room_id1, "activity after toekn6", tok=user2_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint
-            + f"?pos={self.get_success(from_token.to_string(self.store))}",
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [],
-                        # Large enough to see the latest events and before the invite
-                        "timeline_limit": 4,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # `timeline` is omitted for `invite` rooms with `stripped_state`
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("timeline"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `num_live` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("num_live"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `limited` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("limited"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `prev_batch` is omitted for `invite` rooms with `stripped_state` (no timeline anyway)
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("prev_batch"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # `required_state` is omitted for `invite` rooms with `stripped_state`
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("required_state"),
-            channel.json_body["rooms"][room_id1],
-        )
-        # We should have some `stripped_state` so the potential joiner can identify the
-        # room (we don't care about the order).
-        self.assertCountEqual(
-            channel.json_body["rooms"][room_id1]["invite_state"],
-            [
-                {
-                    "content": {"creator": user2_id, "room_version": "10"},
-                    "sender": user2_id,
-                    "state_key": "",
-                    "type": "m.room.create",
-                },
-                {
-                    "content": {"join_rule": "public"},
-                    "sender": user2_id,
-                    "state_key": "",
-                    "type": "m.room.join_rules",
-                },
-                {
-                    "content": {"displayname": user2.localpart, "membership": "join"},
-                    "sender": user2_id,
-                    "state_key": user2_id,
-                    "type": "m.room.member",
-                },
-                {
-                    "content": {"displayname": user1.localpart, "membership": "invite"},
-                    "sender": user2_id,
-                    "state_key": user1_id,
-                    "type": "m.room.member",
-                },
-            ],
-            channel.json_body["rooms"][room_id1]["invite_state"],
-        )
-
-    def test_rooms_ban_initial_sync(self) -> None:
-        """
-        Test that `rooms` we are banned from in an intial sync only allows us to see
-        timeline events up to the ban event.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.send(room_id1, "activity before1", tok=user2_tok)
-        self.helper.send(room_id1, "activity before2", tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
-        event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
-        user1_ban_response = self.helper.ban(
-            room_id1, src=user2_id, targ=user1_id, tok=user2_tok
-        )
-
-        self.helper.send(room_id1, "activity after5", tok=user2_tok)
-        self.helper.send(room_id1, "activity after6", tok=user2_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [],
-                        "timeline_limit": 3,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # We should see events before the ban but not after
-        self.assertEqual(
-            [
-                event["event_id"]
-                for event in channel.json_body["rooms"][room_id1]["timeline"]
-            ],
-            [
-                event_response3["event_id"],
-                event_response4["event_id"],
-                user1_ban_response["event_id"],
-            ],
-            channel.json_body["rooms"][room_id1]["timeline"],
-        )
-        # No "live" events in an initial sync (no `from_token` to define the "live"
-        # range)
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["num_live"],
-            0,
-            channel.json_body["rooms"][room_id1],
-        )
-        # There are more events to paginate to
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["limited"],
-            True,
-            channel.json_body["rooms"][room_id1],
-        )
-
-    def test_rooms_ban_incremental_sync1(self) -> None:
-        """
-        Test that `rooms` we are banned from during the next incremental sync only
-        allows us to see timeline events up to the ban event.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.send(room_id1, "activity before1", tok=user2_tok)
-        self.helper.send(room_id1, "activity before2", tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        from_token = self.event_sources.get_current_token()
-
-        event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok)
-        event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok)
-        # The ban is within the token range (between the `from_token` and the sliding
-        # sync request)
-        user1_ban_response = self.helper.ban(
-            room_id1, src=user2_id, targ=user1_id, tok=user2_tok
-        )
-
-        self.helper.send(room_id1, "activity after5", tok=user2_tok)
-        self.helper.send(room_id1, "activity after6", tok=user2_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint
-            + f"?pos={self.get_success(from_token.to_string(self.store))}",
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [],
-                        "timeline_limit": 4,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # We should see events before the ban but not after
-        self.assertEqual(
-            [
-                event["event_id"]
-                for event in channel.json_body["rooms"][room_id1]["timeline"]
-            ],
-            [
-                event_response3["event_id"],
-                event_response4["event_id"],
-                user1_ban_response["event_id"],
-            ],
-            channel.json_body["rooms"][room_id1]["timeline"],
-        )
-        # All live events in the incremental sync
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["num_live"],
-            3,
-            channel.json_body["rooms"][room_id1],
-        )
-        # There aren't anymore events to paginate to in this range
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["limited"],
-            False,
-            channel.json_body["rooms"][room_id1],
-        )
-
-    def test_rooms_ban_incremental_sync2(self) -> None:
-        """
-        Test that `rooms` we are banned from before the incremental sync don't return
-        any events in the timeline.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.send(room_id1, "activity before1", tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        self.helper.send(room_id1, "activity after2", tok=user2_tok)
-        # The ban is before we get our `from_token`
-        self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
-
-        self.helper.send(room_id1, "activity after3", tok=user2_tok)
-
-        from_token = self.event_sources.get_current_token()
-
-        self.helper.send(room_id1, "activity after4", tok=user2_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint
-            + f"?pos={self.get_success(from_token.to_string(self.store))}",
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [],
-                        "timeline_limit": 4,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # Nothing to see for this banned user in the room in the token range
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["timeline"],
-            [],
-            channel.json_body["rooms"][room_id1]["timeline"],
-        )
-        # No events returned in the timeline so nothing is "live"
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["num_live"],
-            0,
-            channel.json_body["rooms"][room_id1],
-        )
-        # There aren't anymore events to paginate to in this range
-        self.assertEqual(
-            channel.json_body["rooms"][room_id1]["limited"],
-            False,
-            channel.json_body["rooms"][room_id1],
-        )
-
-    def test_rooms_no_required_state(self) -> None:
-        """
-        Empty `rooms.required_state` should not return any state events in the room
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        # Empty `required_state`
-                        "required_state": [],
-                        "timeline_limit": 0,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # No `required_state` in response
-        self.assertIsNone(
-            channel.json_body["rooms"][room_id1].get("required_state"),
-            channel.json_body["rooms"][room_id1],
-        )
-
-    def test_rooms_required_state_initial_sync(self) -> None:
-        """
-        Test `rooms.required_state` returns requested state events in the room during an
-        initial sync.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [EventTypes.Create, ""],
-                            [EventTypes.RoomHistoryVisibility, ""],
-                            # This one doesn't exist in the room
-                            [EventTypes.Tombstone, ""],
-                        ],
-                        "timeline_limit": 0,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        state_map = self.get_success(
-            self.storage_controllers.state.get_current_state(room_id1)
-        )
-
-        self._assertRequiredStateIncludes(
-            channel.json_body["rooms"][room_id1]["required_state"],
-            {
-                state_map[(EventTypes.Create, "")],
-                state_map[(EventTypes.RoomHistoryVisibility, "")],
-            },
-            exact=True,
-        )
-
-    def test_rooms_required_state_incremental_sync(self) -> None:
-        """
-        Test `rooms.required_state` returns requested state events in the room during an
-        incremental sync.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        after_room_token = self.event_sources.get_current_token()
-
-        # Make the Sliding Sync request
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint
-            + f"?pos={self.get_success(after_room_token.to_string(self.store))}",
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [EventTypes.Create, ""],
-                            [EventTypes.RoomHistoryVisibility, ""],
-                            # This one doesn't exist in the room
-                            [EventTypes.Tombstone, ""],
-                        ],
-                        "timeline_limit": 0,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        state_map = self.get_success(
-            self.storage_controllers.state.get_current_state(room_id1)
-        )
-
-        # The returned state doesn't change from initial to incremental sync. In the
-        # future, we will only return updates but only if we've sent the room down the
-        # connection before.
-        self._assertRequiredStateIncludes(
-            channel.json_body["rooms"][room_id1]["required_state"],
-            {
-                state_map[(EventTypes.Create, "")],
-                state_map[(EventTypes.RoomHistoryVisibility, "")],
-            },
-            exact=True,
-        )
-
-    def test_rooms_required_state_wildcard(self) -> None:
-        """
-        Test `rooms.required_state` returns all state events when using wildcard `["*", "*"]`.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        self.helper.send_state(
-            room_id1,
-            event_type="org.matrix.foo_state",
-            state_key="",
-            body={"foo": "bar"},
-            tok=user2_tok,
-        )
-        self.helper.send_state(
-            room_id1,
-            event_type="org.matrix.foo_state",
-            state_key="namespaced",
-            body={"foo": "bar"},
-            tok=user2_tok,
-        )
-
-        # Make the Sliding Sync request with wildcards for the `event_type` and `state_key`
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [StateValues.WILDCARD, StateValues.WILDCARD],
-                        ],
-                        "timeline_limit": 0,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        state_map = self.get_success(
-            self.storage_controllers.state.get_current_state(room_id1)
-        )
-
-        self._assertRequiredStateIncludes(
-            channel.json_body["rooms"][room_id1]["required_state"],
-            # We should see all the state events in the room
-            state_map.values(),
-            exact=True,
-        )
-
-    def test_rooms_required_state_wildcard_event_type(self) -> None:
-        """
-        Test `rooms.required_state` returns relevant state events when using wildcard in
-        the event_type `["*", "foobarbaz"]`.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        self.helper.send_state(
-            room_id1,
-            event_type="org.matrix.foo_state",
-            state_key="",
-            body={"foo": "bar"},
-            tok=user2_tok,
-        )
-        self.helper.send_state(
-            room_id1,
-            event_type="org.matrix.foo_state",
-            state_key=user2_id,
-            body={"foo": "bar"},
-            tok=user2_tok,
-        )
-
-        # Make the Sliding Sync request with wildcards for the `event_type`
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [StateValues.WILDCARD, user2_id],
-                        ],
-                        "timeline_limit": 0,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        state_map = self.get_success(
-            self.storage_controllers.state.get_current_state(room_id1)
-        )
-
-        # We expect at-least any state event with the `user2_id` as the `state_key`
-        self._assertRequiredStateIncludes(
-            channel.json_body["rooms"][room_id1]["required_state"],
-            {
-                state_map[(EventTypes.Member, user2_id)],
-                state_map[("org.matrix.foo_state", user2_id)],
-            },
-            # Ideally, this would be exact but we're currently returning all state
-            # events when the `event_type` is a wildcard.
-            exact=False,
-        )
-
-    def test_rooms_required_state_wildcard_state_key(self) -> None:
-        """
-        Test `rooms.required_state` returns relevant state events when using wildcard in
-        the state_key `["foobarbaz","*"]`.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        # Make the Sliding Sync request with wildcards for the `state_key`
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [EventTypes.Member, StateValues.WILDCARD],
-                        ],
-                        "timeline_limit": 0,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        state_map = self.get_success(
-            self.storage_controllers.state.get_current_state(room_id1)
-        )
-
-        self._assertRequiredStateIncludes(
-            channel.json_body["rooms"][room_id1]["required_state"],
-            {
-                state_map[(EventTypes.Member, user1_id)],
-                state_map[(EventTypes.Member, user2_id)],
-            },
-            exact=True,
-        )
-
-    def test_rooms_required_state_lazy_loading_room_members(self) -> None:
-        """
-        Test `rooms.required_state` returns people relevant to the timeline when
-        lazy-loading room members, `["m.room.member","$LAZY"]`.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-        user3_id = self.register_user("user3", "pass")
-        user3_tok = self.login(user3_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.join(room_id1, user3_id, tok=user3_tok)
-
-        self.helper.send(room_id1, "1", tok=user2_tok)
-        self.helper.send(room_id1, "2", tok=user3_tok)
-        self.helper.send(room_id1, "3", tok=user2_tok)
-
-        # Make the Sliding Sync request with lazy loading for the room members
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [EventTypes.Create, ""],
-                            [EventTypes.Member, StateValues.LAZY],
-                        ],
-                        "timeline_limit": 3,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        state_map = self.get_success(
-            self.storage_controllers.state.get_current_state(room_id1)
-        )
-
-        # Only user2 and user3 sent events in the 3 events we see in the `timeline`
-        self._assertRequiredStateIncludes(
-            channel.json_body["rooms"][room_id1]["required_state"],
-            {
-                state_map[(EventTypes.Create, "")],
-                state_map[(EventTypes.Member, user2_id)],
-                state_map[(EventTypes.Member, user3_id)],
-            },
-            exact=True,
-        )
-
-    @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)])
-    def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None:
-        """
-        Test `rooms.required_state` should not return state past a leave/ban event.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-        user3_id = self.register_user("user3", "pass")
-        user3_tok = self.login(user3_id, "pass")
-
-        from_token = self.event_sources.get_current_token()
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-        self.helper.join(room_id1, user3_id, tok=user3_tok)
-
-        self.helper.send_state(
-            room_id1,
-            event_type="org.matrix.foo_state",
-            state_key="",
-            body={"foo": "bar"},
-            tok=user2_tok,
-        )
-
-        if stop_membership == Membership.LEAVE:
-            # User 1 leaves
-            self.helper.leave(room_id1, user1_id, tok=user1_tok)
-        elif stop_membership == Membership.BAN:
-            # User 1 is banned
-            self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
-
-        state_map = self.get_success(
-            self.storage_controllers.state.get_current_state(room_id1)
-        )
-
-        # Change the state after user 1 leaves
-        self.helper.send_state(
-            room_id1,
-            event_type="org.matrix.foo_state",
-            state_key="",
-            body={"foo": "qux"},
-            tok=user2_tok,
-        )
-        self.helper.leave(room_id1, user3_id, tok=user3_tok)
-
-        # Make the Sliding Sync request with lazy loading for the room members
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint
-            + f"?pos={self.get_success(from_token.to_string(self.store))}",
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [EventTypes.Create, ""],
-                            [EventTypes.Member, "*"],
-                            ["org.matrix.foo_state", ""],
-                        ],
-                        "timeline_limit": 3,
-                    }
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # Only user2 and user3 sent events in the 3 events we see in the `timeline`
-        self._assertRequiredStateIncludes(
-            channel.json_body["rooms"][room_id1]["required_state"],
-            {
-                state_map[(EventTypes.Create, "")],
-                state_map[(EventTypes.Member, user1_id)],
-                state_map[(EventTypes.Member, user2_id)],
-                state_map[(EventTypes.Member, user3_id)],
-                state_map[("org.matrix.foo_state", "")],
-            },
-            exact=True,
-        )
-
-    def test_rooms_required_state_combine_superset(self) -> None:
-        """
-        Test `rooms.required_state` is combined across lists and room subscriptions.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        self.helper.join(room_id1, user1_id, tok=user1_tok)
-
-        self.helper.send_state(
-            room_id1,
-            event_type="org.matrix.foo_state",
-            state_key="",
-            body={"foo": "bar"},
-            tok=user2_tok,
-        )
-
-        # Make the Sliding Sync request with wildcards for the `state_key`
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [EventTypes.Create, ""],
-                            [EventTypes.Member, user1_id],
-                        ],
-                        "timeline_limit": 0,
-                    },
-                    "bar-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [EventTypes.Member, StateValues.WILDCARD],
-                            ["org.matrix.foo_state", ""],
-                        ],
-                        "timeline_limit": 0,
-                    },
-                }
-                # TODO: Room subscription should also combine with the `required_state`
-                # "room_subscriptions": {
-                #     room_id1: {
-                #         "required_state": [
-                #             ["org.matrix.bar_state", ""]
-                #         ],
-                #         "timeline_limit": 0,
-                #     }
-                # }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        state_map = self.get_success(
-            self.storage_controllers.state.get_current_state(room_id1)
-        )
-
-        self._assertRequiredStateIncludes(
-            channel.json_body["rooms"][room_id1]["required_state"],
-            {
-                state_map[(EventTypes.Create, "")],
-                state_map[(EventTypes.Member, user1_id)],
-                state_map[(EventTypes.Member, user2_id)],
-                state_map[("org.matrix.foo_state", "")],
-            },
-            exact=True,
-        )
-
-    def test_rooms_required_state_partial_state(self) -> None:
-        """
-        Test partially-stated room are excluded unless `rooms.required_state` is
-        lazy-loading room members.
-        """
-        user1_id = self.register_user("user1", "pass")
-        user1_tok = self.login(user1_id, "pass")
-        user2_id = self.register_user("user2", "pass")
-        user2_tok = self.login(user2_id, "pass")
-
-        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
-        _join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
-        join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
-
-        # Mark room2 as partial state
-        self.get_success(
-            mark_event_as_partial_state(self.hs, join_response2["event_id"], room_id2)
-        )
-
-        # Make the Sliding Sync request (NOT lazy-loading room members)
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [EventTypes.Create, ""],
-                        ],
-                        "timeline_limit": 0,
-                    },
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # Make sure the list includes room1 but room2 is excluded because it's still
-        # partially-stated
-        self.assertListEqual(
-            list(channel.json_body["lists"]["foo-list"]["ops"]),
-            [
-                {
-                    "op": "SYNC",
-                    "range": [0, 1],
-                    "room_ids": [room_id1],
-                }
-            ],
-            channel.json_body["lists"]["foo-list"],
-        )
-
-        # Make the Sliding Sync request (with lazy-loading room members)
-        channel = self.make_request(
-            "POST",
-            self.sync_endpoint,
-            {
-                "lists": {
-                    "foo-list": {
-                        "ranges": [[0, 1]],
-                        "required_state": [
-                            [EventTypes.Create, ""],
-                            # Lazy-load room members
-                            [EventTypes.Member, StateValues.LAZY],
-                        ],
-                        "timeline_limit": 0,
-                    },
-                }
-            },
-            access_token=user1_tok,
-        )
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # The list should include both rooms now because we're lazy-loading room members
-        self.assertListEqual(
-            list(channel.json_body["lists"]["foo-list"]["ops"]),
-            [
-                {
-                    "op": "SYNC",
-                    "range": [0, 1],
-                    "room_ids": [room_id2, room_id1],
-                }
-            ],
-            channel.json_body["lists"]["foo-list"],
-        )
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index e43140720d..a1c284726a 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -45,7 +45,7 @@ from typing_extensions import Literal
 from twisted.test.proto_helpers import MemoryReactorClock
 from twisted.web.server import Site
 
-from synapse.api.constants import Membership
+from synapse.api.constants import Membership, ReceiptTypes
 from synapse.api.errors import Codes
 from synapse.server import HomeServer
 from synapse.types import JsonDict
@@ -330,22 +330,24 @@ class RestHelper:
             data,
         )
 
-        assert (
-            channel.code == expect_code
-        ), "Expected: %d, got: %d, PUT %s -> resp: %r" % (
-            expect_code,
-            channel.code,
-            path,
-            channel.result["body"],
+        assert channel.code == expect_code, (
+            "Expected: %d, got: %d, PUT %s -> resp: %r"
+            % (
+                expect_code,
+                channel.code,
+                path,
+                channel.result["body"],
+            )
         )
 
         if expect_errcode:
-            assert (
-                str(channel.json_body["errcode"]) == expect_errcode
-            ), "Expected: %r, got: %r, resp: %r" % (
-                expect_errcode,
-                channel.json_body["errcode"],
-                channel.result["body"],
+            assert str(channel.json_body["errcode"]) == expect_errcode, (
+                "Expected: %r, got: %r, resp: %r"
+                % (
+                    expect_errcode,
+                    channel.json_body["errcode"],
+                    channel.result["body"],
+                )
             )
 
         if expect_additional_fields is not None:
@@ -354,13 +356,14 @@ class RestHelper:
                     expect_key,
                     channel.json_body,
                 )
-                assert (
-                    channel.json_body[expect_key] == expect_value
-                ), "Expected: %s at %s, got: %s, resp: %s" % (
-                    expect_value,
-                    expect_key,
-                    channel.json_body[expect_key],
-                    channel.json_body,
+                assert channel.json_body[expect_key] == expect_value, (
+                    "Expected: %s at %s, got: %s, resp: %s"
+                    % (
+                        expect_value,
+                        expect_key,
+                        channel.json_body[expect_key],
+                        channel.json_body,
+                    )
                 )
 
         self.auth_user_id = temp_id
@@ -944,3 +947,15 @@ class RestHelper:
         assert len(p.links) == 1, "not exactly one link in confirmation page"
         oauth_uri = p.links[0]
         return oauth_uri
+
+    def send_read_receipt(self, room_id: str, event_id: str, *, tok: str) -> None:
+        """Send a read receipt into the room at the given event"""
+        channel = make_request(
+            self.reactor,
+            self.site,
+            method="POST",
+            path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_id}",
+            content={},
+            access_token=tok,
+        )
+        assert channel.code == HTTPStatus.OK, channel.text_body
diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py
index e166c13bc1..c73717f014 100644
--- a/tests/rest/test_well_known.py
+++ b/tests/rest/test_well_known.py
@@ -17,6 +17,8 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
+from unittest.mock import AsyncMock
+
 from twisted.web.resource import Resource
 
 from synapse.rest.well_known import well_known_resource
@@ -112,7 +114,6 @@ class WellKnownTests(unittest.HomeserverTestCase):
                 "msc3861": {
                     "enabled": True,
                     "issuer": "https://issuer",
-                    "account_management_url": "https://my-account.issuer",
                     "client_id": "id",
                     "client_auth_method": "client_secret_post",
                     "client_secret": "secret",
@@ -122,18 +123,33 @@ class WellKnownTests(unittest.HomeserverTestCase):
         }
     )
     def test_client_well_known_msc3861_oauth_delegation(self) -> None:
-        channel = self.make_request(
-            "GET", "/.well-known/matrix/client", shorthand=False
+        # Patch the HTTP client to return the issuer metadata
+        req_mock = AsyncMock(
+            return_value={
+                "issuer": "https://issuer",
+                "account_management_uri": "https://my-account.issuer",
+            }
         )
+        self.hs.get_proxied_http_client().get_json = req_mock  # type: ignore[method-assign]
 
-        self.assertEqual(channel.code, 200)
-        self.assertEqual(
-            channel.json_body,
-            {
-                "m.homeserver": {"base_url": "https://homeserver/"},
-                "org.matrix.msc2965.authentication": {
-                    "issuer": "https://issuer",
-                    "account": "https://my-account.issuer",
+        for _ in range(2):
+            channel = self.make_request(
+                "GET", "/.well-known/matrix/client", shorthand=False
+            )
+
+            self.assertEqual(channel.code, 200)
+            self.assertEqual(
+                channel.json_body,
+                {
+                    "m.homeserver": {"base_url": "https://homeserver/"},
+                    "org.matrix.msc2965.authentication": {
+                        "issuer": "https://issuer",
+                        "account": "https://my-account.issuer",
+                    },
                 },
-            },
+            )
+
+        # It should have been called exactly once, because it gets cached
+        req_mock.assert_called_once_with(
+            "https://issuer/.well-known/openid-configuration"
         )
diff --git a/tests/server.py b/tests/server.py
index f1cd0f76be..95aff6f66c 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -198,17 +198,35 @@ class FakeChannel:
     def headers(self) -> Headers:
         if not self.result:
             raise Exception("No result yet.")
-        h = Headers()
-        for i in self.result["headers"]:
-            h.addRawHeader(*i)
+
+        h = self.result["headers"]
+        assert isinstance(h, Headers)
         return h
 
     def writeHeaders(
-        self, version: bytes, code: bytes, reason: bytes, headers: Headers
+        self,
+        version: bytes,
+        code: bytes,
+        reason: bytes,
+        headers: Union[Headers, List[Tuple[bytes, bytes]]],
     ) -> None:
         self.result["version"] = version
         self.result["code"] = code
         self.result["reason"] = reason
+
+        if isinstance(headers, list):
+            # Support prior to Twisted 24.7.0rc1
+            new_headers = Headers()
+            for k, v in headers:
+                assert isinstance(k, bytes), f"key is not of type bytes: {k!r}"
+                assert isinstance(v, bytes), f"value is not of type bytes: {v!r}"
+                new_headers.addRawHeader(k, v)
+            headers = new_headers
+
+        assert isinstance(
+            headers, Headers
+        ), f"headers are of the wrong type: {headers!r}"
+
         self.result["headers"] = headers
 
     def write(self, data: bytes) -> None:
@@ -289,10 +307,6 @@ class FakeChannel:
         self._reactor.run()
 
         while not self.is_finished():
-            # If there's a producer, tell it to resume producing so we get content
-            if self._producer:
-                self._producer.resumeProducing()
-
             if self._reactor.seconds() > end_time:
                 raise TimedOutException("Timed out waiting for request to finish.")
 
@@ -1152,6 +1166,12 @@ def setup_test_homeserver(
 
     hs.get_auth_handler().validate_hash = validate_hash  # type: ignore[assignment]
 
+    # We need to replace the media threadpool with the fake test threadpool.
+    def thread_pool() -> threadpool.ThreadPool:
+        return reactor.getThreadPool()
+
+    hs.get_media_sender_thread_pool = thread_pool  # type: ignore[method-assign]
+
     # Load any configured modules into the homeserver
     module_api = hs.get_module_api()
     for module, module_config in hs.config.modules.loaded_modules:
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index 0e3e4f7293..997ee7b91b 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -89,7 +89,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase):
             return_value="!something:localhost"
         )
         self._rlsn._store.add_tag_to_room = AsyncMock(return_value=None)  # type: ignore[method-assign]
-        self._rlsn._store.get_tags_for_room = AsyncMock(return_value={})  # type: ignore[method-assign]
+        self._rlsn._store.get_tags_for_room = AsyncMock(return_value={})
 
     @override_config({"hs_disabled": True})
     def test_maybe_send_server_notice_disabled_hs(self) -> None:
diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py
index fd1f5e7fd5..104d141a72 100644
--- a/tests/storage/databases/main/test_events_worker.py
+++ b/tests/storage/databases/main/test_events_worker.py
@@ -20,7 +20,7 @@
 #
 import json
 from contextlib import contextmanager
-from typing import Generator, List, Tuple
+from typing import Generator, List, Set, Tuple
 from unittest import mock
 
 from twisted.enterprise.adbapi import ConnectionPool
@@ -295,6 +295,53 @@ class EventCacheTestCase(unittest.HomeserverTestCase):
             self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1)
 
 
+class GetEventsTestCase(unittest.HomeserverTestCase):
+    """Test `get_events(...)`/`get_events_as_list(...)`"""
+
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store: EventsWorkerStore = hs.get_datastores().main
+
+    def test_get_lots_of_messages(self) -> None:
+        """Sanity check that `get_events(...)`/`get_events_as_list(...)` works"""
+        num_events = 100
+
+        user_id = self.register_user("user", "pass")
+        user_tok = self.login(user_id, "pass")
+
+        room_id = self.helper.create_room_as(user_id, tok=user_tok)
+
+        event_ids: Set[str] = set()
+        for i in range(num_events):
+            event = self.get_success(
+                inject_event(
+                    self.hs,
+                    room_id=room_id,
+                    type="m.room.message",
+                    sender=user_id,
+                    content={
+                        "body": f"foo{i}",
+                        "msgtype": "m.text",
+                    },
+                )
+            )
+            event_ids.add(event.event_id)
+
+        # Sanity check that we actually created the events
+        self.assertEqual(len(event_ids), num_events)
+
+        # This is the function under test
+        fetched_event_map = self.get_success(self.store.get_events(event_ids))
+
+        # Sanity check that we got the events back
+        self.assertIncludes(fetched_event_map.keys(), event_ids, exact=True)
+
+
 class DatabaseOutageTestCase(unittest.HomeserverTestCase):
     """Test event fetching during a database outage."""
 
diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py
index 506d981ce6..49dc973a36 100644
--- a/tests/storage/test__base.py
+++ b/tests/storage/test__base.py
@@ -112,6 +112,24 @@ class UpdateUpsertManyTests(unittest.HomeserverTestCase):
             {(1, "user1", "hello"), (2, "user2", "bleb")},
         )
 
+        self.get_success(
+            self.storage.db_pool.runInteraction(
+                "test",
+                self.storage.db_pool.simple_upsert_many_txn,
+                self.table_name,
+                key_names=key_names,
+                key_values=[[2, "user2"]],
+                value_names=[],
+                value_values=[],
+            )
+        )
+
+        # Check results are what we expect
+        self.assertEqual(
+            set(self._dump_table_to_tuple()),
+            {(1, "user1", "hello"), (2, "user2", "bleb")},
+        )
+
     def test_simple_update_many(self) -> None:
         """
         simple_update_many performs many updates at once.
diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py
index 0a7c4c9421..cb3d8e19bc 100644
--- a/tests/storage/test_events.py
+++ b/tests/storage/test_events.py
@@ -19,6 +19,7 @@
 #
 #
 
+import logging
 from typing import List, Optional
 
 from twisted.test.proto_helpers import MemoryReactor
@@ -35,6 +36,8 @@ from synapse.util import Clock
 
 from tests.unittest import HomeserverTestCase
 
+logger = logging.getLogger(__name__)
+
 
 class ExtremPruneTestCase(HomeserverTestCase):
     servlets = [
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 882f3bbbdc..330fea0e62 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -19,20 +19,29 @@
 # [This file includes modifications made by New Vector Limited]
 #
 #
+import logging
 from typing import List, Optional, Tuple, cast
 
 from twisted.test.proto_helpers import MemoryReactor
 
-from synapse.api.constants import Membership
+from synapse.api.constants import EventContentFields, EventTypes, JoinRules, Membership
+from synapse.api.room_versions import RoomVersions
+from synapse.rest import admin
 from synapse.rest.admin import register_servlets_for_client_rest_resource
-from synapse.rest.client import login, room
+from synapse.rest.client import knock, login, room
 from synapse.server import HomeServer
+from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
+from synapse.storage.roommember import MemberSummary
 from synapse.types import UserID, create_requester
 from synapse.util import Clock
 
 from tests import unittest
 from tests.server import TestHomeServer
 from tests.test_utils import event_injection
+from tests.test_utils.event_injection import create_event
+from tests.unittest import skip_unless
+
+logger = logging.getLogger(__name__)
 
 
 class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
@@ -46,6 +55,10 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
         # We can't test the RoomMemberStore on its own without the other event
         # storage logic
         self.store = hs.get_datastores().main
+        self.state_handler = self.hs.get_state_handler()
+        persistence = self.hs.get_storage_controllers().persistence
+        assert persistence is not None
+        self.persistence = persistence
 
         self.u_alice = self.register_user("alice", "pass")
         self.t_alice = self.login("alice", "pass")
@@ -212,31 +225,557 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
         )
 
     def test_join_locally_forgotten_room(self) -> None:
-        """Tests if a user joins a forgotten room the room is not forgotten anymore."""
-        self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
-        self.assertFalse(
-            self.get_success(self.store.is_locally_forgotten_room(self.room))
-        )
+        """
+        Tests if a user joins a forgotten room, the room is not forgotten anymore.
 
-        # after leaving and forget the room, it is forgotten
-        self.get_success(
-            event_injection.inject_member_event(
-                self.hs, self.room, self.u_alice, "leave"
+        Since a room can't be re-joined if everyone has left. This can only happen with
+        a room with remote users in it.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote room
+        creator = "@user:other"
+        room_id = "!foo:other"
+        room_version = RoomVersions.V10
+        shared_kwargs = {
+            "room_id": room_id,
+            "room_version": room_version.identifier,
+        }
+
+        create_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[],
+                type=EventTypes.Create,
+                state_key="",
+                content={
+                    # The `ROOM_CREATOR` field could be removed if we used a room
+                    # version > 10 (in favor of relying on `sender`)
+                    EventContentFields.ROOM_CREATOR: creator,
+                    EventContentFields.ROOM_VERSION: room_version.identifier,
+                },
+                sender=creator,
+                **shared_kwargs,
             )
         )
-        self.get_success(self.store.forget(self.u_alice, self.room))
-        self.assertTrue(
-            self.get_success(self.store.is_locally_forgotten_room(self.room))
-        )
-
-        # after rejoin the room is not forgotten anymore
-        self.get_success(
-            event_injection.inject_member_event(
-                self.hs, self.room, self.u_alice, "join"
+        creator_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[create_tuple[0].event_id],
+                auth_event_ids=[create_tuple[0].event_id],
+                type=EventTypes.Member,
+                state_key=creator,
+                content={"membership": Membership.JOIN},
+                sender=creator,
+                **shared_kwargs,
             )
         )
+
+        remote_events_and_contexts = [
+            create_tuple,
+            creator_tuple,
+        ]
+
+        # Ensure the local HS knows the room version
+        self.get_success(self.store.store_room(room_id, creator, False, room_version))
+
+        # Persist these events as backfilled events.
+        for event, context in remote_events_and_contexts:
+            self.get_success(
+                self.persistence.persist_event(event, context, backfilled=True)
+            )
+
+        # Now we join the local user to the room. We want to make this feel as close to
+        # the real `process_remote_join()` as possible but we'd like to avoid some of
+        # the auth checks that would be done in the real code.
+        #
+        # FIXME: The test was originally written using this less-real
+        # `persist_event(...)` shortcut but it would be nice to use the real remote join
+        # process in a `FederatingHomeserverTestCase`.
+        flawed_join_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[creator_tuple[0].event_id],
+                # This doesn't work correctly to create an `EventContext` that includes
+                # both of these state events. I assume it's because we're working on our
+                # local homeserver which has the remote state set as `outlier`. We have
+                # to create our own EventContext below to get this right.
+                auth_event_ids=[create_tuple[0].event_id],
+                type=EventTypes.Member,
+                state_key=user1_id,
+                content={"membership": Membership.JOIN},
+                sender=user1_id,
+                **shared_kwargs,
+            )
+        )
+        # We have to create our own context to get the state set correctly. If we use
+        # the `EventContext` from the `flawed_join_tuple`, the `current_state_events`
+        # table will only have the join event in it which should never happen in our
+        # real server.
+        join_event = flawed_join_tuple[0]
+        join_context = self.get_success(
+            self.state_handler.compute_event_context(
+                join_event,
+                state_ids_before_event={
+                    (e.type, e.state_key): e.event_id for e in [create_tuple[0]]
+                },
+                partial_state=False,
+            )
+        )
+        self.get_success(self.persistence.persist_event(join_event, join_context))
+
+        # The room shouldn't be forgotten because the local user just joined
         self.assertFalse(
-            self.get_success(self.store.is_locally_forgotten_room(self.room))
+            self.get_success(self.store.is_locally_forgotten_room(room_id))
+        )
+
+        # After all of the local users (there is only user1) leave and forgetting the
+        # room, it is forgotten
+        user1_leave_response = self.helper.leave(room_id, user1_id, tok=user1_tok)
+        user1_leave_event = self.get_success(
+            self.store.get_event(user1_leave_response["event_id"])
+        )
+        self.get_success(self.store.forget(user1_id, room_id))
+        self.assertTrue(self.get_success(self.store.is_locally_forgotten_room(room_id)))
+
+        # Join the local user to the room (again). We want to make this feel as close to
+        # the real `process_remote_join()` as possible but we'd like to avoid some of
+        # the auth checks that would be done in the real code.
+        #
+        # FIXME: The test was originally written using this less-real
+        # `event_injection.inject_member_event(...)` shortcut but it would be nice to
+        # use the real remote join process in a `FederatingHomeserverTestCase`.
+        flawed_join_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[user1_leave_response["event_id"]],
+                # This doesn't work correctly to create an `EventContext` that includes
+                # both of these state events. I assume it's because we're working on our
+                # local homeserver which has the remote state set as `outlier`. We have
+                # to create our own EventContext below to get this right.
+                auth_event_ids=[
+                    create_tuple[0].event_id,
+                    user1_leave_response["event_id"],
+                ],
+                type=EventTypes.Member,
+                state_key=user1_id,
+                content={"membership": Membership.JOIN},
+                sender=user1_id,
+                **shared_kwargs,
+            )
+        )
+        # We have to create our own context to get the state set correctly. If we use
+        # the `EventContext` from the `flawed_join_tuple`, the `current_state_events`
+        # table will only have the join event in it which should never happen in our
+        # real server.
+        join_event = flawed_join_tuple[0]
+        join_context = self.get_success(
+            self.state_handler.compute_event_context(
+                join_event,
+                state_ids_before_event={
+                    (e.type, e.state_key): e.event_id
+                    for e in [create_tuple[0], user1_leave_event]
+                },
+                partial_state=False,
+            )
+        )
+        self.get_success(self.persistence.persist_event(join_event, join_context))
+
+        # After the local user rejoins the remote room, it isn't forgotten anymore
+        self.assertFalse(
+            self.get_success(self.store.is_locally_forgotten_room(room_id))
+        )
+
+
+class RoomSummaryTestCase(unittest.HomeserverTestCase):
+    """
+    Test `/sync` room summary related logic like `get_room_summary(...)` and
+    `extract_heroes_from_room_summary(...)`
+    """
+
+    servlets = [
+        admin.register_servlets,
+        knock.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
+        self.store = self.hs.get_datastores().main
+
+    def _assert_member_summary(
+        self,
+        actual_member_summary: MemberSummary,
+        expected_member_list: List[str],
+        *,
+        expected_member_count: Optional[int] = None,
+    ) -> None:
+        """
+        Assert that the `MemberSummary` object has the expected members.
+        """
+        self.assertListEqual(
+            [
+                user_id
+                for user_id, _membership_event_id in actual_member_summary.members
+            ],
+            expected_member_list,
+        )
+        self.assertEqual(
+            actual_member_summary.count,
+            (
+                expected_member_count
+                if expected_member_count is not None
+                else len(expected_member_list)
+            ),
+        )
+
+    def test_get_room_summary_membership(self) -> None:
+        """
+        Test that `get_room_summary(...)` gets every kind of membership when there
+        aren't that many members in the room.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        _user3_tok = self.login(user3_id, "pass")
+        user4_id = self.register_user("user4", "pass")
+        user4_tok = self.login(user4_id, "pass")
+        user5_id = self.register_user("user5", "pass")
+        user5_tok = self.login(user5_id, "pass")
+
+        # Setup a room (user1 is the creator and is joined to the room)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # User2 is banned
+        self.helper.join(room_id, user2_id, tok=user2_tok)
+        self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
+
+        # User3 is invited by user1
+        self.helper.invite(room_id, targ=user3_id, tok=user1_tok)
+
+        # User4 leaves
+        self.helper.join(room_id, user4_id, tok=user4_tok)
+        self.helper.leave(room_id, user4_id, tok=user4_tok)
+
+        # User5 joins
+        self.helper.join(room_id, user5_id, tok=user5_tok)
+
+        room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
+        empty_ms = MemberSummary([], 0)
+
+        self._assert_member_summary(
+            room_membership_summary.get(Membership.JOIN, empty_ms),
+            [user1_id, user5_id],
+        )
+        self._assert_member_summary(
+            room_membership_summary.get(Membership.INVITE, empty_ms), [user3_id]
+        )
+        self._assert_member_summary(
+            room_membership_summary.get(Membership.LEAVE, empty_ms), [user4_id]
+        )
+        self._assert_member_summary(
+            room_membership_summary.get(Membership.BAN, empty_ms), [user2_id]
+        )
+        self._assert_member_summary(
+            room_membership_summary.get(Membership.KNOCK, empty_ms),
+            [
+                # No one knocked
+            ],
+        )
+
+    def test_get_room_summary_membership_order(self) -> None:
+        """
+        Test that `get_room_summary(...)` stacks our limit of 6 in this order: joins ->
+        invites -> leave -> everything else (bans/knocks)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        _user3_tok = self.login(user3_id, "pass")
+        user4_id = self.register_user("user4", "pass")
+        user4_tok = self.login(user4_id, "pass")
+        user5_id = self.register_user("user5", "pass")
+        user5_tok = self.login(user5_id, "pass")
+        user6_id = self.register_user("user6", "pass")
+        user6_tok = self.login(user6_id, "pass")
+        user7_id = self.register_user("user7", "pass")
+        user7_tok = self.login(user7_id, "pass")
+
+        # Setup the room (user1 is the creator and is joined to the room)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # We expect the order to be joins -> invites -> leave -> bans so setup the users
+        # *NOT* in that same order to make sure we're actually sorting them.
+
+        # User2 is banned
+        self.helper.join(room_id, user2_id, tok=user2_tok)
+        self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
+
+        # User3 is invited by user1
+        self.helper.invite(room_id, targ=user3_id, tok=user1_tok)
+
+        # User4 leaves
+        self.helper.join(room_id, user4_id, tok=user4_tok)
+        self.helper.leave(room_id, user4_id, tok=user4_tok)
+
+        # User5, User6, User7 joins
+        self.helper.join(room_id, user5_id, tok=user5_tok)
+        self.helper.join(room_id, user6_id, tok=user6_tok)
+        self.helper.join(room_id, user7_id, tok=user7_tok)
+
+        room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
+        empty_ms = MemberSummary([], 0)
+
+        self._assert_member_summary(
+            room_membership_summary.get(Membership.JOIN, empty_ms),
+            [user1_id, user5_id, user6_id, user7_id],
+        )
+        self._assert_member_summary(
+            room_membership_summary.get(Membership.INVITE, empty_ms), [user3_id]
+        )
+        self._assert_member_summary(
+            room_membership_summary.get(Membership.LEAVE, empty_ms), [user4_id]
+        )
+        self._assert_member_summary(
+            room_membership_summary.get(Membership.BAN, empty_ms),
+            [
+                # The banned user is not in the summary because the summary can only fit
+                # 6 members and prefers everything else before bans
+                #
+                # user2_id
+            ],
+            # But we still see the count of banned users
+            expected_member_count=1,
+        )
+        self._assert_member_summary(
+            room_membership_summary.get(Membership.KNOCK, empty_ms),
+            [
+                # No one knocked
+            ],
+        )
+
+    def test_extract_heroes_from_room_summary_excludes_self(self) -> None:
+        """
+        Test that `extract_heroes_from_room_summary(...)` does not include the user
+        itself.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Setup the room (user1 is the creator and is joined to the room)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # User2 joins
+        self.helper.join(room_id, user2_id, tok=user2_tok)
+
+        room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
+
+        # We first ask from the perspective of a random fake user
+        hero_user_ids = extract_heroes_from_room_summary(
+            room_membership_summary, me="@fakeuser"
+        )
+
+        # Make sure user1 is in the room (ensure our test setup is correct)
+        self.assertListEqual(hero_user_ids, [user1_id, user2_id])
+
+        # Now, we ask for the room summary from the perspective of user1
+        hero_user_ids = extract_heroes_from_room_summary(
+            room_membership_summary, me=user1_id
+        )
+
+        # User1 should not be included in the list of heroes because they are the one
+        # asking
+        self.assertListEqual(hero_user_ids, [user2_id])
+
+    def test_extract_heroes_from_room_summary_first_five_joins(self) -> None:
+        """
+        Test that `extract_heroes_from_room_summary(...)` returns the first 5 joins.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+        user4_id = self.register_user("user4", "pass")
+        user4_tok = self.login(user4_id, "pass")
+        user5_id = self.register_user("user5", "pass")
+        user5_tok = self.login(user5_id, "pass")
+        user6_id = self.register_user("user6", "pass")
+        user6_tok = self.login(user6_id, "pass")
+        user7_id = self.register_user("user7", "pass")
+        user7_tok = self.login(user7_id, "pass")
+
+        # Setup the room (user1 is the creator and is joined to the room)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # User2 -> User7 joins
+        self.helper.join(room_id, user2_id, tok=user2_tok)
+        self.helper.join(room_id, user3_id, tok=user3_tok)
+        self.helper.join(room_id, user4_id, tok=user4_tok)
+        self.helper.join(room_id, user5_id, tok=user5_tok)
+        self.helper.join(room_id, user6_id, tok=user6_tok)
+        self.helper.join(room_id, user7_id, tok=user7_tok)
+
+        room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
+
+        hero_user_ids = extract_heroes_from_room_summary(
+            room_membership_summary, me="@fakuser"
+        )
+
+        # First 5 users to join the room
+        self.assertListEqual(
+            hero_user_ids, [user1_id, user2_id, user3_id, user4_id, user5_id]
+        )
+
+    def test_extract_heroes_from_room_summary_membership_order(self) -> None:
+        """
+        Test that `extract_heroes_from_room_summary(...)` prefers joins/invites over
+        everything else.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        _user3_tok = self.login(user3_id, "pass")
+        user4_id = self.register_user("user4", "pass")
+        user4_tok = self.login(user4_id, "pass")
+        user5_id = self.register_user("user5", "pass")
+        user5_tok = self.login(user5_id, "pass")
+
+        # Setup the room (user1 is the creator and is joined to the room)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # We expect the order to be joins -> invites -> leave -> bans so setup the users
+        # *NOT* in that same order to make sure we're actually sorting them.
+
+        # User2 is banned
+        self.helper.join(room_id, user2_id, tok=user2_tok)
+        self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
+
+        # User3 is invited by user1
+        self.helper.invite(room_id, targ=user3_id, tok=user1_tok)
+
+        # User4 leaves
+        self.helper.join(room_id, user4_id, tok=user4_tok)
+        self.helper.leave(room_id, user4_id, tok=user4_tok)
+
+        # User5 joins
+        self.helper.join(room_id, user5_id, tok=user5_tok)
+
+        room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
+
+        hero_user_ids = extract_heroes_from_room_summary(
+            room_membership_summary, me="@fakeuser"
+        )
+
+        # Prefer joins -> invites, over everything else
+        self.assertListEqual(
+            hero_user_ids,
+            [
+                # The joins
+                user1_id,
+                user5_id,
+                # The invites
+                user3_id,
+            ],
+        )
+
+    @skip_unless(
+        False,
+        "Test is not possible because when everyone leaves the room, "
+        + "the server is `no_longer_in_room` and we don't have any `current_state_events` to query",
+    )
+    def test_extract_heroes_from_room_summary_fallback_leave_ban(self) -> None:
+        """
+        Test that `extract_heroes_from_room_summary(...)` falls back to leave/ban if
+        there aren't any joins/invites.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+
+        # Setup the room (user1 is the creator and is joined to the room)
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # User2 is banned
+        self.helper.join(room_id, user2_id, tok=user2_tok)
+        self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
+
+        # User3 leaves
+        self.helper.join(room_id, user3_id, tok=user3_tok)
+        self.helper.leave(room_id, user3_id, tok=user3_tok)
+
+        # User1 leaves (we're doing this last because they're the room creator)
+        self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+        room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
+
+        hero_user_ids = extract_heroes_from_room_summary(
+            room_membership_summary, me="@fakeuser"
+        )
+
+        # Fallback to people who left -> banned
+        self.assertListEqual(
+            hero_user_ids,
+            [user3_id, user1_id, user3_id],
+        )
+
+    def test_extract_heroes_from_room_summary_excludes_knocks(self) -> None:
+        """
+        People who knock on the room have (potentially) never been in the room before
+        and are total outsiders. Plus the spec doesn't mention them at all for heroes.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Setup the knock room (user1 is the creator and is joined to the room)
+        knock_room_id = self.helper.create_room_as(
+            user1_id, tok=user1_tok, room_version=RoomVersions.V7.identifier
+        )
+        self.helper.send_state(
+            knock_room_id,
+            EventTypes.JoinRules,
+            {"join_rule": JoinRules.KNOCK},
+            tok=user1_tok,
+        )
+
+        # User2 knocks on the room
+        knock_channel = self.make_request(
+            "POST",
+            "/_matrix/client/r0/knock/%s" % (knock_room_id,),
+            b"{}",
+            user2_tok,
+        )
+        self.assertEqual(knock_channel.code, 200, knock_channel.result)
+
+        room_membership_summary = self.get_success(
+            self.store.get_room_summary(knock_room_id)
+        )
+
+        hero_user_ids = extract_heroes_from_room_summary(
+            room_membership_summary, me="@fakeuser"
+        )
+
+        # user1 is the creator and is joined to the room (should show up as a hero)
+        # user2 is knocking on the room (should not show up as a hero)
+        self.assertListEqual(
+            hero_user_ids,
+            [user1_id],
         )
 
 
diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py
new file mode 100644
index 0000000000..35917505a4
--- /dev/null
+++ b/tests/storage/test_sliding_sync_tables.py
@@ -0,0 +1,5016 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+# Originally licensed under the Apache License, Version 2.0:
+# <http://www.apache.org/licenses/LICENSE-2.0>.
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+import logging
+from typing import Dict, List, Optional, Tuple, cast
+
+import attr
+from parameterized import parameterized
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes
+from synapse.api.room_versions import RoomVersions
+from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict
+from synapse.events.snapshot import EventContext
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage.databases.main.events import DeltaState
+from synapse.storage.databases.main.events_bg_updates import (
+    _resolve_stale_data_in_sliding_sync_joined_rooms_table,
+    _resolve_stale_data_in_sliding_sync_membership_snapshots_table,
+)
+from synapse.types import create_requester
+from synapse.types.storage import _BackgroundUpdates
+from synapse.util import Clock
+
+from tests.test_utils.event_injection import create_event
+from tests.unittest import HomeserverTestCase
+
+logger = logging.getLogger(__name__)
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _SlidingSyncJoinedRoomResult:
+    room_id: str
+    # `event_stream_ordering` is only optional to allow easier semantics when we make
+    # expected objects from `event.internal_metadata.stream_ordering`. in the tests.
+    # `event.internal_metadata.stream_ordering` is marked optional because it only
+    # exists for persisted events but in the context of these tests, we're only working
+    # with persisted events and we're making comparisons so we will find any mismatch.
+    event_stream_ordering: Optional[int]
+    bump_stamp: Optional[int]
+    room_type: Optional[str]
+    room_name: Optional[str]
+    is_encrypted: bool
+    tombstone_successor_room_id: Optional[str]
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _SlidingSyncMembershipSnapshotResult:
+    room_id: str
+    user_id: str
+    sender: str
+    membership_event_id: str
+    membership: str
+    # `event_stream_ordering` is only optional to allow easier semantics when we make
+    # expected objects from `event.internal_metadata.stream_ordering`. in the tests.
+    # `event.internal_metadata.stream_ordering` is marked optional because it only
+    # exists for persisted events but in the context of these tests, we're only working
+    # with persisted events and we're making comparisons so we will find any mismatch.
+    event_stream_ordering: Optional[int]
+    has_known_state: bool
+    room_type: Optional[str]
+    room_name: Optional[str]
+    is_encrypted: bool
+    tombstone_successor_room_id: Optional[str]
+    # Make this default to "not forgotten" because it doesn't apply to many tests and we
+    # don't want to force all of the tests to deal with it.
+    forgotten: bool = False
+
+
+class SlidingSyncTablesTestCaseBase(HomeserverTestCase):
+    """
+    Helpers to deal with testing that the
+    `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` database tables are
+    populated correctly.
+    """
+
+    servlets = [
+        admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.storage_controllers = hs.get_storage_controllers()
+        persist_events_store = self.hs.get_datastores().persist_events
+        assert persist_events_store is not None
+        self.persist_events_store = persist_events_store
+
+        persist_controller = self.hs.get_storage_controllers().persistence
+        assert persist_controller is not None
+        self.persist_controller = persist_controller
+
+        self.state_handler = self.hs.get_state_handler()
+
+    def _get_sliding_sync_joined_rooms(self) -> Dict[str, _SlidingSyncJoinedRoomResult]:
+        """
+        Return the rows from the `sliding_sync_joined_rooms` table.
+
+        Returns:
+            Mapping from room_id to _SlidingSyncJoinedRoomResult.
+        """
+        rows = cast(
+            List[Tuple[str, int, int, str, str, bool, str]],
+            self.get_success(
+                self.store.db_pool.simple_select_list(
+                    "sliding_sync_joined_rooms",
+                    None,
+                    retcols=(
+                        "room_id",
+                        "event_stream_ordering",
+                        "bump_stamp",
+                        "room_type",
+                        "room_name",
+                        "is_encrypted",
+                        "tombstone_successor_room_id",
+                    ),
+                ),
+            ),
+        )
+
+        return {
+            row[0]: _SlidingSyncJoinedRoomResult(
+                room_id=row[0],
+                event_stream_ordering=row[1],
+                bump_stamp=row[2],
+                room_type=row[3],
+                room_name=row[4],
+                is_encrypted=bool(row[5]),
+                tombstone_successor_room_id=row[6],
+            )
+            for row in rows
+        }
+
+    def _get_sliding_sync_membership_snapshots(
+        self,
+    ) -> Dict[Tuple[str, str], _SlidingSyncMembershipSnapshotResult]:
+        """
+        Return the rows from the `sliding_sync_membership_snapshots` table.
+
+        Returns:
+            Mapping from the (room_id, user_id) to _SlidingSyncMembershipSnapshotResult.
+        """
+        rows = cast(
+            List[Tuple[str, str, str, str, str, int, int, bool, str, str, bool, str]],
+            self.get_success(
+                self.store.db_pool.simple_select_list(
+                    "sliding_sync_membership_snapshots",
+                    None,
+                    retcols=(
+                        "room_id",
+                        "user_id",
+                        "sender",
+                        "membership_event_id",
+                        "membership",
+                        "forgotten",
+                        "event_stream_ordering",
+                        "has_known_state",
+                        "room_type",
+                        "room_name",
+                        "is_encrypted",
+                        "tombstone_successor_room_id",
+                    ),
+                ),
+            ),
+        )
+
+        return {
+            (row[0], row[1]): _SlidingSyncMembershipSnapshotResult(
+                room_id=row[0],
+                user_id=row[1],
+                sender=row[2],
+                membership_event_id=row[3],
+                membership=row[4],
+                forgotten=bool(row[5]),
+                event_stream_ordering=row[6],
+                has_known_state=bool(row[7]),
+                room_type=row[8],
+                room_name=row[9],
+                is_encrypted=bool(row[10]),
+                tombstone_successor_room_id=row[11],
+            )
+            for row in rows
+        }
+
+    _remote_invite_count: int = 0
+
+    def _create_remote_invite_room_for_user(
+        self,
+        invitee_user_id: str,
+        unsigned_invite_room_state: Optional[List[StrippedStateEvent]],
+    ) -> Tuple[str, EventBase]:
+        """
+        Create a fake invite for a remote room and persist it.
+
+        We don't have any state for these kind of rooms and can only rely on the
+        stripped state included in the unsigned portion of the invite event to identify
+        the room.
+
+        Args:
+            invitee_user_id: The person being invited
+            unsigned_invite_room_state: List of stripped state events to assist the
+                receiver in identifying the room.
+
+        Returns:
+            The room ID of the remote invite room and the persisted remote invite event.
+        """
+        invite_room_id = f"!test_room{self._remote_invite_count}:remote_server"
+
+        invite_event_dict = {
+            "room_id": invite_room_id,
+            "sender": "@inviter:remote_server",
+            "state_key": invitee_user_id,
+            "depth": 1,
+            "origin_server_ts": 1,
+            "type": EventTypes.Member,
+            "content": {"membership": Membership.INVITE},
+            "auth_events": [],
+            "prev_events": [],
+        }
+        if unsigned_invite_room_state is not None:
+            serialized_stripped_state_events = []
+            for stripped_event in unsigned_invite_room_state:
+                serialized_stripped_state_events.append(
+                    {
+                        "type": stripped_event.type,
+                        "state_key": stripped_event.state_key,
+                        "sender": stripped_event.sender,
+                        "content": stripped_event.content,
+                    }
+                )
+
+            invite_event_dict["unsigned"] = {
+                "invite_room_state": serialized_stripped_state_events
+            }
+
+        invite_event = make_event_from_dict(
+            invite_event_dict,
+            room_version=RoomVersions.V10,
+        )
+        invite_event.internal_metadata.outlier = True
+        invite_event.internal_metadata.out_of_band_membership = True
+
+        self.get_success(
+            self.store.maybe_store_room_on_outlier_membership(
+                room_id=invite_room_id, room_version=invite_event.room_version
+            )
+        )
+        context = EventContext.for_outlier(self.hs.get_storage_controllers())
+        persisted_event, _, _ = self.get_success(
+            self.persist_controller.persist_event(invite_event, context)
+        )
+
+        self._remote_invite_count += 1
+
+        return invite_room_id, persisted_event
+
+    def _retract_remote_invite_for_user(
+        self,
+        user_id: str,
+        remote_room_id: str,
+    ) -> EventBase:
+        """
+        Create a fake invite retraction for a remote room and persist it.
+
+        Retracting an invite just means the person is no longer invited to the room.
+        This is done by someone with proper power levels kicking the user from the room.
+        A kick shows up as a leave event for a given person with a different `sender`.
+
+        Args:
+            user_id: The person who was invited and we're going to retract the
+                invite for.
+            remote_room_id: The room ID that the invite was for.
+
+        Returns:
+            The persisted leave (kick) event.
+        """
+
+        kick_event_dict = {
+            "room_id": remote_room_id,
+            "sender": "@inviter:remote_server",
+            "state_key": user_id,
+            "depth": 1,
+            "origin_server_ts": 1,
+            "type": EventTypes.Member,
+            "content": {"membership": Membership.LEAVE},
+            "auth_events": [],
+            "prev_events": [],
+        }
+
+        kick_event = make_event_from_dict(
+            kick_event_dict,
+            room_version=RoomVersions.V10,
+        )
+        kick_event.internal_metadata.outlier = True
+        kick_event.internal_metadata.out_of_band_membership = True
+
+        self.get_success(
+            self.store.maybe_store_room_on_outlier_membership(
+                room_id=remote_room_id, room_version=kick_event.room_version
+            )
+        )
+        context = EventContext.for_outlier(self.hs.get_storage_controllers())
+        persisted_event, _, _ = self.get_success(
+            self.persist_controller.persist_event(kick_event, context)
+        )
+
+        return persisted_event
+
+
+class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase):
+    """
+    Tests to make sure the
+    `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` database tables are
+    populated and updated correctly as new events are sent.
+    """
+
+    def test_joined_room_with_no_info(self) -> None:
+        """
+        Test joined room that doesn't have a room type, encryption, or name shows up in
+        `sliding_sync_joined_rooms`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id1},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id1],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id1,
+                # History visibility just happens to be the last event sent in the room
+                event_stream_ordering=state_map[
+                    (EventTypes.RoomHistoryVisibility, "")
+                ].internal_metadata.stream_ordering,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_joined_room_with_info(self) -> None:
+        """
+        Test joined encrypted room with name shows up in `sliding_sync_joined_rooms`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id1,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user2_tok,
+        )
+        # Encrypt the room
+        self.helper.send_state(
+            room_id1,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user2_tok,
+        )
+        # Add a tombstone
+        self.helper.send_state(
+            room_id1,
+            EventTypes.Tombstone,
+            {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"},
+            tok=user2_tok,
+        )
+
+        # User1 joins the room
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id1},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id1],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id1,
+                # This should be whatever is the last event in the room
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=True,
+                tombstone_successor_room_id="another_room",
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=True,
+                tombstone_successor_room_id="another_room",
+            ),
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user2_id,
+                sender=user2_id,
+                membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user2_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                # Even though this room does have a name, is encrypted, and has a
+                # tombstone, user2 is the room creator and joined at the room creation
+                # time which didn't have this state set yet.
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_joined_space_room_with_info(self) -> None:
+        """
+        Test joined space room with name shows up in `sliding_sync_joined_rooms`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        space_room_id = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+        # Add a room name
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.Name,
+            {"name": "my super duper space"},
+            tok=user2_tok,
+        )
+
+        # User1 joins the room
+        user1_join_response = self.helper.join(space_room_id, user1_id, tok=user1_tok)
+        user1_join_event_pos = self.get_success(
+            self.store.get_position_for_event(user1_join_response["event_id"])
+        )
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(space_room_id)
+        )
+
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {space_room_id},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[space_room_id],
+            _SlidingSyncJoinedRoomResult(
+                room_id=space_room_id,
+                event_stream_ordering=user1_join_event_pos.stream,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (space_room_id, user1_id),
+                (space_room_id, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=space_room_id,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((space_room_id, user2_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=space_room_id,
+                user_id=user2_id,
+                sender=user2_id,
+                membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user2_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=RoomTypes.SPACE,
+                # Even though this room does have a name, user2 is the room creator and
+                # joined at the room creation time which didn't have this state set yet.
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_joined_room_with_state_updated(self) -> None:
+        """
+        Test state derived info in `sliding_sync_joined_rooms` is updated when the
+        current state is updated.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id1,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user2_tok,
+        )
+
+        # User1 joins the room
+        user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        user1_join_event_pos = self.get_success(
+            self.store.get_position_for_event(user1_join_response["event_id"])
+        )
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id1},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id1],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id1,
+                event_stream_ordering=user1_join_event_pos.stream,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+            },
+            exact=True,
+        )
+
+        # Update the room name
+        self.helper.send_state(
+            room_id1,
+            EventTypes.Name,
+            {"name": "my super duper room was renamed"},
+            tok=user2_tok,
+        )
+        # Encrypt the room
+        encrypt_room_response = self.helper.send_state(
+            room_id1,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user2_tok,
+        )
+        encrypt_room_event_pos = self.get_success(
+            self.store.get_position_for_event(encrypt_room_response["event_id"])
+        )
+
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id1},
+            exact=True,
+        )
+        # Make sure we see the new room name
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id1],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id1,
+                event_stream_ordering=encrypt_room_event_pos.stream,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room was renamed",
+                is_encrypted=True,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user2_id,
+                sender=user2_id,
+                membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user2_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_joined_room_is_bumped(self) -> None:
+        """
+        Test that `event_stream_ordering` and `bump_stamp` is updated when a new bump
+        event is sent (`sliding_sync_joined_rooms`).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id1,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user2_tok,
+        )
+
+        # User1 joins the room
+        user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        user1_join_event_pos = self.get_success(
+            self.store.get_position_for_event(user1_join_response["event_id"])
+        )
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id1},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id1],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id1,
+                event_stream_ordering=user1_join_event_pos.stream,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user joined
+        user1_snapshot = _SlidingSyncMembershipSnapshotResult(
+            room_id=room_id1,
+            user_id=user1_id,
+            sender=user1_id,
+            membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+            membership=Membership.JOIN,
+            event_stream_ordering=state_map[
+                (EventTypes.Member, user1_id)
+            ].internal_metadata.stream_ordering,
+            has_known_state=True,
+            room_type=None,
+            room_name="my super duper room",
+            is_encrypted=False,
+            tombstone_successor_room_id=None,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            user1_snapshot,
+        )
+        # Holds the info according to the current state when the user joined
+        user2_snapshot = _SlidingSyncMembershipSnapshotResult(
+            room_id=room_id1,
+            user_id=user2_id,
+            sender=user2_id,
+            membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+            membership=Membership.JOIN,
+            event_stream_ordering=state_map[
+                (EventTypes.Member, user2_id)
+            ].internal_metadata.stream_ordering,
+            has_known_state=True,
+            room_type=None,
+            room_name=None,
+            is_encrypted=False,
+            tombstone_successor_room_id=None,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+            user2_snapshot,
+        )
+
+        # Send a new message to bump the room
+        event_response = self.helper.send(room_id1, "some message", tok=user1_tok)
+        event_pos = self.get_success(
+            self.store.get_position_for_event(event_response["event_id"])
+        )
+
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id1},
+            exact=True,
+        )
+        # Make sure we see the new room name
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id1],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id1,
+                # Updated `event_stream_ordering`
+                event_stream_ordering=event_pos.stream,
+                # And since the event was a bump event, the `bump_stamp` should be updated
+                bump_stamp=event_pos.stream,
+                # The state is still the same (it didn't change)
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            user1_snapshot,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+            user2_snapshot,
+        )
+
+    def test_joined_room_bump_stamp_backfill(self) -> None:
+        """
+        Test that `bump_stamp` ignores backfilled events, i.e. events with a
+        negative stream ordering.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote room
+        creator = "@user:other"
+        room_id = "!foo:other"
+        room_version = RoomVersions.V10
+        shared_kwargs = {
+            "room_id": room_id,
+            "room_version": room_version.identifier,
+        }
+
+        create_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[],
+                type=EventTypes.Create,
+                state_key="",
+                content={
+                    # The `ROOM_CREATOR` field could be removed if we used a room
+                    # version > 10 (in favor of relying on `sender`)
+                    EventContentFields.ROOM_CREATOR: creator,
+                    EventContentFields.ROOM_VERSION: room_version.identifier,
+                },
+                sender=creator,
+                **shared_kwargs,
+            )
+        )
+        creator_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[create_tuple[0].event_id],
+                auth_event_ids=[create_tuple[0].event_id],
+                type=EventTypes.Member,
+                state_key=creator,
+                content={"membership": Membership.JOIN},
+                sender=creator,
+                **shared_kwargs,
+            )
+        )
+        room_name_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[creator_tuple[0].event_id],
+                auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id],
+                type=EventTypes.Name,
+                state_key="",
+                content={
+                    EventContentFields.ROOM_NAME: "my super duper room",
+                },
+                sender=creator,
+                **shared_kwargs,
+            )
+        )
+        # We add a message event as a valid "bump type"
+        msg_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[room_name_tuple[0].event_id],
+                auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id],
+                type=EventTypes.Message,
+                content={"body": "foo", "msgtype": "m.text"},
+                sender=creator,
+                **shared_kwargs,
+            )
+        )
+        invite_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[msg_tuple[0].event_id],
+                auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id],
+                type=EventTypes.Member,
+                state_key=user1_id,
+                content={"membership": Membership.INVITE},
+                sender=creator,
+                **shared_kwargs,
+            )
+        )
+
+        remote_events_and_contexts = [
+            create_tuple,
+            creator_tuple,
+            room_name_tuple,
+            msg_tuple,
+            invite_tuple,
+        ]
+
+        # Ensure the local HS knows the room version
+        self.get_success(self.store.store_room(room_id, creator, False, room_version))
+
+        # Persist these events as backfilled events.
+        for event, context in remote_events_and_contexts:
+            self.get_success(
+                self.persist_controller.persist_event(event, context, backfilled=True)
+            )
+
+        # Now we join the local user to the room. We want to make this feel as close to
+        # the real `process_remote_join()` as possible but we'd like to avoid some of
+        # the auth checks that would be done in the real code.
+        #
+        # FIXME: The test was originally written using this less-real
+        # `persist_event(...)` shortcut but it would be nice to use the real remote join
+        # process in a `FederatingHomeserverTestCase`.
+        flawed_join_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[invite_tuple[0].event_id],
+                # This doesn't work correctly to create an `EventContext` that includes
+                # both of these state events. I assume it's because we're working on our
+                # local homeserver which has the remote state set as `outlier`. We have
+                # to create our own EventContext below to get this right.
+                auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id],
+                type=EventTypes.Member,
+                state_key=user1_id,
+                content={"membership": Membership.JOIN},
+                sender=user1_id,
+                **shared_kwargs,
+            )
+        )
+        # We have to create our own context to get the state set correctly. If we use
+        # the `EventContext` from the `flawed_join_tuple`, the `current_state_events`
+        # table will only have the join event in it which should never happen in our
+        # real server.
+        join_event = flawed_join_tuple[0]
+        join_context = self.get_success(
+            self.state_handler.compute_event_context(
+                join_event,
+                state_ids_before_event={
+                    (e.type, e.state_key): e.event_id
+                    for e in [create_tuple[0], invite_tuple[0], room_name_tuple[0]]
+                },
+                partial_state=False,
+            )
+        )
+        join_event, _join_event_pos, _room_token = self.get_success(
+            self.persist_controller.persist_event(join_event, join_context)
+        )
+
+        # Make sure the tables are populated correctly
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id,
+                # This should be the last event in the room (the join membership)
+                event_stream_ordering=join_event.internal_metadata.stream_ordering,
+                # Since all of the bump events are backfilled, the `bump_stamp` should
+                # still be `None`. (and we will fallback to the users membership event
+                # position in the Sliding Sync API)
+                bump_stamp=None,
+                room_type=None,
+                # We still pick up state of the room even if it's backfilled
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=join_event.event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=join_event.internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    @parameterized.expand(
+        # Test both an insert an upsert into the
+        # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` to exercise
+        # more possibilities of things going wrong.
+        [
+            ("insert", True),
+            ("upsert", False),
+        ]
+    )
+    def test_joined_room_outlier_and_deoutlier(
+        self, description: str, should_insert: bool
+    ) -> None:
+        """
+        This is a regression test.
+
+        This is to simulate the case where an event is first persisted as an outlier
+        (like a remote invite) and then later persisted again to de-outlier it. The
+        first the time, the `outlier` is persisted with one `stream_ordering` but when
+        persisted again and de-outliered, it is assigned a different `stream_ordering`
+        that won't end up being used. Since we call
+        `_calculate_sliding_sync_table_changes()` before `_update_outliers_txn()` which
+        fixes this discrepancy (always use the `stream_ordering` from the first time it
+        was persisted), make sure we're not using an unreliable `stream_ordering` values
+        that will cause `FOREIGN KEY constraint failed` in the
+        `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_version = RoomVersions.V10
+        room_id = self.helper.create_room_as(
+            user2_id, tok=user2_tok, room_version=room_version.identifier
+        )
+
+        if should_insert:
+            # Clear these out so we always insert
+            self.get_success(
+                self.store.db_pool.simple_delete(
+                    table="sliding_sync_joined_rooms",
+                    keyvalues={"room_id": room_id},
+                    desc="TODO",
+                )
+            )
+            self.get_success(
+                self.store.db_pool.simple_delete(
+                    table="sliding_sync_membership_snapshots",
+                    keyvalues={"room_id": room_id},
+                    desc="TODO",
+                )
+            )
+
+        # Create a membership event (which triggers an insert into
+        # `sliding_sync_membership_snapshots`)
+        membership_event_dict = {
+            "type": EventTypes.Member,
+            "state_key": user1_id,
+            "sender": user1_id,
+            "room_id": room_id,
+            "content": {EventContentFields.MEMBERSHIP: Membership.JOIN},
+        }
+        # Create a relevant state event (which triggers an insert into
+        # `sliding_sync_joined_rooms`)
+        state_event_dict = {
+            "type": EventTypes.Name,
+            "state_key": "",
+            "sender": user2_id,
+            "room_id": room_id,
+            "content": {EventContentFields.ROOM_NAME: "my super room"},
+        }
+        event_dicts_to_persist = [
+            membership_event_dict,
+            state_event_dict,
+        ]
+
+        for event_dict in event_dicts_to_persist:
+            events_to_persist = []
+
+            # Create the events as an outliers
+            (
+                event,
+                unpersisted_context,
+            ) = self.get_success(
+                self.hs.get_event_creation_handler().create_event(
+                    requester=create_requester(user1_id),
+                    event_dict=event_dict,
+                    outlier=True,
+                )
+            )
+            # FIXME: Should we use an `EventContext.for_outlier(...)` here?
+            # Doesn't seem to matter for this test.
+            context = self.get_success(unpersisted_context.persist(event))
+            events_to_persist.append((event, context))
+
+            # Create the event again but as an non-outlier. This will de-outlier the event
+            # when we persist it.
+            (
+                event,
+                unpersisted_context,
+            ) = self.get_success(
+                self.hs.get_event_creation_handler().create_event(
+                    requester=create_requester(user1_id),
+                    event_dict=event_dict,
+                    outlier=False,
+                )
+            )
+            context = self.get_success(unpersisted_context.persist(event))
+            events_to_persist.append((event, context))
+
+            for event, context in events_to_persist:
+                self.get_success(
+                    self.persist_controller.persist_event(
+                        event,
+                        context,
+                    )
+                )
+
+        # We're just testing that it does not explode
+
+    def test_joined_room_meta_state_reset(self) -> None:
+        """
+        Test that a state reset on the room name is reflected in the
+        `sliding_sync_joined_rooms` table.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user2_tok,
+        )
+
+        # User1 joins the room
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+
+        # Make sure we see the new room name
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id},
+            exact=True,
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id)
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id,
+                # This should be whatever is the last event in the room
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+        user1_snapshot = _SlidingSyncMembershipSnapshotResult(
+            room_id=room_id,
+            user_id=user1_id,
+            sender=user1_id,
+            membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+            membership=Membership.JOIN,
+            event_stream_ordering=state_map[
+                (EventTypes.Member, user1_id)
+            ].internal_metadata.stream_ordering,
+            has_known_state=True,
+            room_type=None,
+            room_name="my super duper room",
+            is_encrypted=False,
+            tombstone_successor_room_id=None,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+            user1_snapshot,
+        )
+        # Holds the info according to the current state when the user joined (no room
+        # name when the room creator joined)
+        user2_snapshot = _SlidingSyncMembershipSnapshotResult(
+            room_id=room_id,
+            user_id=user2_id,
+            sender=user2_id,
+            membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+            membership=Membership.JOIN,
+            event_stream_ordering=state_map[
+                (EventTypes.Member, user2_id)
+            ].internal_metadata.stream_ordering,
+            has_known_state=True,
+            room_type=None,
+            room_name=None,
+            is_encrypted=False,
+            tombstone_successor_room_id=None,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+            user2_snapshot,
+        )
+
+        # Mock a state reset removing the room name state from the current state
+        message_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[state_map[(EventTypes.Name, "")].event_id],
+                auth_event_ids=[
+                    state_map[(EventTypes.Create, "")].event_id,
+                    state_map[(EventTypes.Member, user1_id)].event_id,
+                ],
+                type=EventTypes.Message,
+                content={"body": "foo", "msgtype": "m.text"},
+                sender=user1_id,
+                room_id=room_id,
+                room_version=RoomVersions.V10.identifier,
+            )
+        )
+        event_chunk = [message_tuple]
+        self.get_success(
+            self.persist_events_store._persist_events_and_state_updates(
+                room_id,
+                event_chunk,
+                state_delta_for_room=DeltaState(
+                    # This is the state reset part. We're removing the room name state.
+                    to_delete=[(EventTypes.Name, "")],
+                    to_insert={},
+                ),
+                new_forward_extremities={message_tuple[0].event_id},
+                use_negative_stream_ordering=False,
+                inhibit_local_membership_updates=False,
+                new_event_links={},
+            )
+        )
+
+        # Make sure the state reset is reflected in the `sliding_sync_joined_rooms` table
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id},
+            exact=True,
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id)
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id,
+                # This should be whatever is the last event in the room
+                event_stream_ordering=message_tuple[
+                    0
+                ].internal_metadata.stream_ordering,
+                bump_stamp=message_tuple[0].internal_metadata.stream_ordering,
+                room_type=None,
+                # This was state reset back to None
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        # State reset shouldn't be reflected in the `sliding_sync_membership_snapshots`
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+        # Snapshots haven't changed
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+            user1_snapshot,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+            user2_snapshot,
+        )
+
+    def test_joined_room_fully_insert_on_state_update(self) -> None:
+        """
+        Test that when an existing room updates it's state and we don't have a
+        corresponding row in `sliding_sync_joined_rooms` yet, we fully-insert the row
+        even though only a tiny piece of state changed.
+
+        FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+        foreground update for
+        `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+        https://github.com/element-hq/synapse/issues/17623)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user1_tok,
+        )
+
+        # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made
+        # it into the table. This is to simulate an existing room (before we event added
+        # the sliding sync tables) not being in the `sliding_sync_joined_rooms` table
+        # yet.
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="sliding_sync_joined_rooms",
+                keyvalues={"room_id": room_id},
+                desc="simulate existing room not being in the sliding_sync_joined_rooms table yet",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Encrypt the room
+        self.helper.send_state(
+            room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+
+        # The room should now be in the `sliding_sync_joined_rooms` table
+        # (fully-inserted with all of the state values).
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id},
+            exact=True,
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id)
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id,
+                # This should be whatever is the last event in the room
+                event_stream_ordering=state_map[
+                    (EventTypes.RoomEncryption, "")
+                ].internal_metadata.stream_ordering,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=True,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_joined_room_nothing_if_not_in_table_when_bumped(self) -> None:
+        """
+        Test a new message being sent in an existing room when we don't have a
+        corresponding row in `sliding_sync_joined_rooms` yet; either nothing should
+        happen or we should fully-insert the row. We currently do nothing.
+
+        FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+        foreground update for
+        `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+        https://github.com/element-hq/synapse/issues/17623)
+        """
+
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user1_tok,
+        )
+        # Encrypt the room
+        self.helper.send_state(
+            room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+
+        # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made
+        # it into the table. This is to simulate an existing room (before we event added
+        # the sliding sync tables) not being in the `sliding_sync_joined_rooms` table
+        # yet.
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="sliding_sync_joined_rooms",
+                keyvalues={"room_id": room_id},
+                desc="simulate existing room not being in the sliding_sync_joined_rooms table yet",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Send a new message to bump the room
+        self.helper.send(room_id, "some message", tok=user1_tok)
+
+        # Either nothing should happen or we should fully-insert the row. We currently
+        # do nothing for non-state events.
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+    def test_non_join_space_room_with_info(self) -> None:
+        """
+        Test users who was invited shows up in `sliding_sync_membership_snapshots`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        space_room_id = self.helper.create_room_as(
+            user2_id,
+            tok=user2_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+        # Add a room name
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.Name,
+            {"name": "my super duper space"},
+            tok=user2_tok,
+        )
+        # Encrypt the room
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user2_tok,
+        )
+        # Add a tombstone
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.Tombstone,
+            {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"},
+            tok=user2_tok,
+        )
+
+        # User1 is invited to the room
+        user1_invited_response = self.helper.invite(
+            space_room_id, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+        user1_invited_event_pos = self.get_success(
+            self.store.get_position_for_event(user1_invited_response["event_id"])
+        )
+
+        # Update the room name after we are invited just to make sure
+        # we don't update non-join memberships when the room name changes.
+        rename_response = self.helper.send_state(
+            space_room_id,
+            EventTypes.Name,
+            {"name": "my super duper space was renamed"},
+            tok=user2_tok,
+        )
+        rename_event_pos = self.get_success(
+            self.store.get_position_for_event(rename_response["event_id"])
+        )
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(space_room_id)
+        )
+
+        # User2 is still joined to the room so we should still have an entry in the
+        # `sliding_sync_joined_rooms` table.
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {space_room_id},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[space_room_id],
+            _SlidingSyncJoinedRoomResult(
+                room_id=space_room_id,
+                event_stream_ordering=rename_event_pos.stream,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space was renamed",
+                is_encrypted=True,
+                tombstone_successor_room_id="another_room",
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (space_room_id, user1_id),
+                (space_room_id, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user was invited
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=space_room_id,
+                user_id=user1_id,
+                sender=user2_id,
+                membership_event_id=user1_invited_response["event_id"],
+                membership=Membership.INVITE,
+                event_stream_ordering=user1_invited_event_pos.stream,
+                has_known_state=True,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space",
+                is_encrypted=True,
+                tombstone_successor_room_id="another_room",
+            ),
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((space_room_id, user2_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=space_room_id,
+                user_id=user2_id,
+                sender=user2_id,
+                membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user2_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=RoomTypes.SPACE,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_non_join_invite_ban(self) -> None:
+        """
+        Test users who have invite/ban membership in room shows up in
+        `sliding_sync_membership_snapshots`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+        user3_id = self.register_user("user3", "pass")
+        user3_tok = self.login(user3_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # User1 is invited to the room
+        user1_invited_response = self.helper.invite(
+            room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+        user1_invited_event_pos = self.get_success(
+            self.store.get_position_for_event(user1_invited_response["event_id"])
+        )
+
+        # User3 joins the room
+        self.helper.join(room_id1, user3_id, tok=user3_tok)
+        # User3 is banned from the room
+        user3_ban_response = self.helper.ban(
+            room_id1, src=user2_id, targ=user3_id, tok=user2_tok
+        )
+        user3_ban_event_pos = self.get_success(
+            self.store.get_position_for_event(user3_ban_response["event_id"])
+        )
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # User2 is still joined to the room so we should still have an entry
+        # in the `sliding_sync_joined_rooms` table.
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id1},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id1],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id1,
+                event_stream_ordering=user3_ban_event_pos.stream,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+                (room_id1, user3_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user was invited
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user1_id,
+                sender=user2_id,
+                membership_event_id=user1_invited_response["event_id"],
+                membership=Membership.INVITE,
+                event_stream_ordering=user1_invited_event_pos.stream,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user2_id,
+                sender=user2_id,
+                membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user2_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        # Holds the info according to the current state when the user was banned
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user3_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user3_id,
+                sender=user2_id,
+                membership_event_id=user3_ban_response["event_id"],
+                membership=Membership.BAN,
+                event_stream_ordering=user3_ban_event_pos.stream,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_non_join_reject_invite_empty_room(self) -> None:
+        """
+        In a room where no one is joined (`no_longer_in_room`), test rejecting an invite.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # User1 is invited to the room
+        self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+        # User2 leaves the room
+        user2_leave_response = self.helper.leave(room_id1, user2_id, tok=user2_tok)
+        user2_leave_event_pos = self.get_success(
+            self.store.get_position_for_event(user2_leave_response["event_id"])
+        )
+
+        # User1 rejects the invite
+        user1_leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        user1_leave_event_pos = self.get_success(
+            self.store.get_position_for_event(user1_leave_response["event_id"])
+        )
+
+        # No one is joined to the room
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user left
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=user1_leave_response["event_id"],
+                membership=Membership.LEAVE,
+                event_stream_ordering=user1_leave_event_pos.stream,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        # Holds the info according to the current state when the left
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user2_id,
+                sender=user2_id,
+                membership_event_id=user2_leave_response["event_id"],
+                membership=Membership.LEAVE,
+                event_stream_ordering=user2_leave_event_pos.stream,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_membership_changing(self) -> None:
+        """
+        Test latest snapshot evolves when membership changes (`sliding_sync_membership_snapshots`).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # User1 is invited to the room
+        # ======================================================
+        user1_invited_response = self.helper.invite(
+            room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+        user1_invited_event_pos = self.get_success(
+            self.store.get_position_for_event(user1_invited_response["event_id"])
+        )
+
+        # Update the room name after the user was invited
+        room_name_update_response = self.helper.send_state(
+            room_id1,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user2_tok,
+        )
+        room_name_update_event_pos = self.get_success(
+            self.store.get_position_for_event(room_name_update_response["event_id"])
+        )
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id1)
+        )
+
+        # Assert joined room status
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id1},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id1],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id1,
+                # Latest event in the room
+                event_stream_ordering=room_name_update_event_pos.stream,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        # Assert membership snapshots
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user was invited
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user1_id,
+                sender=user2_id,
+                membership_event_id=user1_invited_response["event_id"],
+                membership=Membership.INVITE,
+                event_stream_ordering=user1_invited_event_pos.stream,
+                has_known_state=True,
+                room_type=None,
+                # Room name was updated after the user was invited so we should still
+                # see it unset here
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        # Holds the info according to the current state when the user joined
+        user2_snapshot = _SlidingSyncMembershipSnapshotResult(
+            room_id=room_id1,
+            user_id=user2_id,
+            sender=user2_id,
+            membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+            membership=Membership.JOIN,
+            event_stream_ordering=state_map[
+                (EventTypes.Member, user2_id)
+            ].internal_metadata.stream_ordering,
+            has_known_state=True,
+            room_type=None,
+            room_name=None,
+            is_encrypted=False,
+            tombstone_successor_room_id=None,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+            user2_snapshot,
+        )
+
+        # User1 joins the room
+        # ======================================================
+        user1_joined_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        user1_joined_event_pos = self.get_success(
+            self.store.get_position_for_event(user1_joined_response["event_id"])
+        )
+
+        # Assert joined room status
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id1},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id1],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id1,
+                # Latest event in the room
+                event_stream_ordering=user1_joined_event_pos.stream,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        # Assert membership snapshots
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=user1_joined_response["event_id"],
+                membership=Membership.JOIN,
+                event_stream_ordering=user1_joined_event_pos.stream,
+                has_known_state=True,
+                room_type=None,
+                # We see the update state because the user joined after the room name
+                # change
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+            user2_snapshot,
+        )
+
+        # User1 is banned from the room
+        # ======================================================
+        user1_ban_response = self.helper.ban(
+            room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+        user1_ban_event_pos = self.get_success(
+            self.store.get_position_for_event(user1_ban_response["event_id"])
+        )
+
+        # Assert joined room status
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id1},
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id1],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id1,
+                # Latest event in the room
+                event_stream_ordering=user1_ban_event_pos.stream,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        # Assert membership snapshots
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user was banned
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user1_id,
+                sender=user2_id,
+                membership_event_id=user1_ban_response["event_id"],
+                membership=Membership.BAN,
+                event_stream_ordering=user1_ban_event_pos.stream,
+                has_known_state=True,
+                room_type=None,
+                # We see the update state because the user joined after the room name
+                # change
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+            user2_snapshot,
+        )
+
+    def test_non_join_server_left_room(self) -> None:
+        """
+        Test everyone local leaves the room but their leave membership still shows up in
+        `sliding_sync_membership_snapshots`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # User1 joins the room
+        self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+        # User2 leaves the room
+        user2_leave_response = self.helper.leave(room_id1, user2_id, tok=user2_tok)
+        user2_leave_event_pos = self.get_success(
+            self.store.get_position_for_event(user2_leave_response["event_id"])
+        )
+
+        # User1 leaves the room
+        user1_leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+        user1_leave_event_pos = self.get_success(
+            self.store.get_position_for_event(user1_leave_response["event_id"])
+        )
+
+        # No one is joined to the room anymore so we shouldn't have an entry in the
+        # `sliding_sync_joined_rooms` table.
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # We should still see rows for the leave events (non-joins)
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id1, user1_id),
+                (room_id1, user2_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=user1_leave_response["event_id"],
+                membership=Membership.LEAVE,
+                event_stream_ordering=user1_leave_event_pos.stream,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id1,
+                user_id=user2_id,
+                sender=user2_id,
+                membership_event_id=user2_leave_response["event_id"],
+                membership=Membership.LEAVE,
+                event_stream_ordering=user2_leave_event_pos.stream,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    @parameterized.expand(
+        [
+            # No stripped state provided
+            ("none", None),
+            # Empty stripped state provided
+            ("empty", []),
+        ]
+    )
+    def test_non_join_remote_invite_no_stripped_state(
+        self, _description: str, stripped_state: Optional[List[StrippedStateEvent]]
+    ) -> None:
+        """
+        Test remote invite with no stripped state provided shows up in
+        `sliding_sync_membership_snapshots` with `has_known_state=False`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room without any `unsigned.invite_room_state`
+        remote_invite_room_id, remote_invite_event = (
+            self._create_remote_invite_room_for_user(user1_id, stripped_state)
+        )
+
+        # No one local is joined to the remote room
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (remote_invite_room_id, user1_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (remote_invite_room_id, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=remote_invite_room_id,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=remote_invite_event.event_id,
+                membership=Membership.INVITE,
+                event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering,
+                # No stripped state provided
+                has_known_state=False,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_non_join_remote_invite_unencrypted_room(self) -> None:
+        """
+        Test remote invite with stripped state (unencrypted room) shows up in
+        `sliding_sync_membership_snapshots`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room with some `unsigned.invite_room_state`
+        # indicating that the room is encrypted.
+        remote_invite_room_id, remote_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.Name,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_NAME: "my super duper room",
+                        },
+                    ),
+                ],
+            )
+        )
+
+        # No one local is joined to the remote room
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (remote_invite_room_id, user1_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (remote_invite_room_id, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=remote_invite_room_id,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=remote_invite_event.event_id,
+                membership=Membership.INVITE,
+                event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_non_join_remote_invite_encrypted_room(self) -> None:
+        """
+        Test remote invite with stripped state (encrypted room) shows up in
+        `sliding_sync_membership_snapshots`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room with some `unsigned.invite_room_state`
+        # indicating that the room is encrypted.
+        remote_invite_room_id, remote_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.RoomEncryption,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+                        },
+                    ),
+                    # This is not one of the stripped state events according to the state
+                    # but we still handle it.
+                    StrippedStateEvent(
+                        type=EventTypes.Tombstone,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room",
+                        },
+                    ),
+                    # Also test a random event that we don't care about
+                    StrippedStateEvent(
+                        type="org.matrix.foo_state",
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            "foo": "qux",
+                        },
+                    ),
+                ],
+            )
+        )
+
+        # No one local is joined to the remote room
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (remote_invite_room_id, user1_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (remote_invite_room_id, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=remote_invite_room_id,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=remote_invite_event.event_id,
+                membership=Membership.INVITE,
+                event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=True,
+                tombstone_successor_room_id="another_room",
+            ),
+        )
+
+    def test_non_join_remote_invite_space_room(self) -> None:
+        """
+        Test remote invite with stripped state (encrypted space room with name) shows up in
+        `sliding_sync_membership_snapshots`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room with some `unsigned.invite_room_state`
+        # indicating that the room is encrypted.
+        remote_invite_room_id, remote_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                            # Specify that it is a space room
+                            EventContentFields.ROOM_TYPE: RoomTypes.SPACE,
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.RoomEncryption,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.Name,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_NAME: "my super duper space",
+                        },
+                    ),
+                ],
+            )
+        )
+
+        # No one local is joined to the remote room
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (remote_invite_room_id, user1_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (remote_invite_room_id, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=remote_invite_room_id,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=remote_invite_event.event_id,
+                membership=Membership.INVITE,
+                event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space",
+                is_encrypted=True,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_non_join_reject_remote_invite(self) -> None:
+        """
+        Test rejected remote invite (user decided to leave the room) inherits meta data
+        from when the remote invite stripped state and shows up in
+        `sliding_sync_membership_snapshots`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room with some `unsigned.invite_room_state`
+        # indicating that the room is encrypted.
+        remote_invite_room_id, remote_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.RoomEncryption,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+                        },
+                    ),
+                ],
+            )
+        )
+
+        # User1 decides to leave the room (reject the invite)
+        user1_leave_response = self.helper.leave(
+            remote_invite_room_id, user1_id, tok=user1_tok
+        )
+        user1_leave_pos = self.get_success(
+            self.store.get_position_for_event(user1_leave_response["event_id"])
+        )
+
+        # No one local is joined to the remote room
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (remote_invite_room_id, user1_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (remote_invite_room_id, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=remote_invite_room_id,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=user1_leave_response["event_id"],
+                membership=Membership.LEAVE,
+                event_stream_ordering=user1_leave_pos.stream,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=True,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_non_join_retracted_remote_invite(self) -> None:
+        """
+        Test retracted remote invite (Remote inviter kicks the person who was invited)
+        inherits meta data from when the remote invite stripped state and shows up in
+        `sliding_sync_membership_snapshots`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+
+        # Create a remote invite room with some `unsigned.invite_room_state`
+        # indicating that the room is encrypted.
+        remote_invite_room_id, remote_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.RoomEncryption,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+                        },
+                    ),
+                ],
+            )
+        )
+
+        # `@inviter:remote_server` decides to retract the invite (kicks the user).
+        # (Note: A kick is just a leave event with a different sender)
+        remote_invite_retraction_event = self._retract_remote_invite_for_user(
+            user_id=user1_id,
+            remote_room_id=remote_invite_room_id,
+        )
+
+        # No one local is joined to the remote room
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (remote_invite_room_id, user1_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (remote_invite_room_id, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=remote_invite_room_id,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=remote_invite_retraction_event.event_id,
+                membership=Membership.LEAVE,
+                event_stream_ordering=remote_invite_retraction_event.internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=True,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_non_join_state_reset(self) -> None:
+        """
+        Test a state reset that removes someone from the room.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user2_tok,
+        )
+
+        # User1 joins the room
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+
+        # Make sure we see the new room name
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id},
+            exact=True,
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id)
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id,
+                # This should be whatever is the last event in the room
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+        user1_snapshot = _SlidingSyncMembershipSnapshotResult(
+            room_id=room_id,
+            user_id=user1_id,
+            sender=user1_id,
+            membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+            membership=Membership.JOIN,
+            event_stream_ordering=state_map[
+                (EventTypes.Member, user1_id)
+            ].internal_metadata.stream_ordering,
+            has_known_state=True,
+            room_type=None,
+            room_name="my super duper room",
+            is_encrypted=False,
+            tombstone_successor_room_id=None,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+            user1_snapshot,
+        )
+        # Holds the info according to the current state when the user joined (no room
+        # name when the room creator joined)
+        user2_snapshot = _SlidingSyncMembershipSnapshotResult(
+            room_id=room_id,
+            user_id=user2_id,
+            sender=user2_id,
+            membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+            membership=Membership.JOIN,
+            event_stream_ordering=state_map[
+                (EventTypes.Member, user2_id)
+            ].internal_metadata.stream_ordering,
+            has_known_state=True,
+            room_type=None,
+            room_name=None,
+            is_encrypted=False,
+            tombstone_successor_room_id=None,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+            user2_snapshot,
+        )
+
+        # Mock a state reset removing the membership for user1 in the current state
+        message_tuple = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[state_map[(EventTypes.Name, "")].event_id],
+                auth_event_ids=[
+                    state_map[(EventTypes.Create, "")].event_id,
+                    state_map[(EventTypes.Member, user1_id)].event_id,
+                ],
+                type=EventTypes.Message,
+                content={"body": "foo", "msgtype": "m.text"},
+                sender=user1_id,
+                room_id=room_id,
+                room_version=RoomVersions.V10.identifier,
+            )
+        )
+        event_chunk = [message_tuple]
+        self.get_success(
+            self.persist_events_store._persist_events_and_state_updates(
+                room_id,
+                event_chunk,
+                state_delta_for_room=DeltaState(
+                    # This is the state reset part. We're removing the room name state.
+                    to_delete=[(EventTypes.Member, user1_id)],
+                    to_insert={},
+                ),
+                new_forward_extremities={message_tuple[0].event_id},
+                use_negative_stream_ordering=False,
+                inhibit_local_membership_updates=False,
+                new_event_links={},
+            )
+        )
+
+        # State reset on membership doesn't affect the`sliding_sync_joined_rooms` table
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id},
+            exact=True,
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id)
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id,
+                # This should be whatever is the last event in the room
+                event_stream_ordering=message_tuple[
+                    0
+                ].internal_metadata.stream_ordering,
+                bump_stamp=message_tuple[0].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+        # State reset on membership should remove the user's snapshot
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                # We shouldn't see user1 in the snapshots table anymore
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+        # Snapshot for user2 hasn't changed
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+            user2_snapshot,
+        )
+
+    def test_membership_snapshot_forget(self) -> None:
+        """
+        Test forgetting a room will update `sliding_sync_membership_snapshots`
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # User1 joins the room
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+        # User1 leaves the room (we have to leave in order to forget the room)
+        self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id)
+        )
+
+        # Check on the `sliding_sync_membership_snapshots` table (nothing should be
+        # forgotten yet)
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user joined
+        user1_snapshot = _SlidingSyncMembershipSnapshotResult(
+            room_id=room_id,
+            user_id=user1_id,
+            sender=user1_id,
+            membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+            membership=Membership.LEAVE,
+            event_stream_ordering=state_map[
+                (EventTypes.Member, user1_id)
+            ].internal_metadata.stream_ordering,
+            has_known_state=True,
+            room_type=None,
+            room_name=None,
+            is_encrypted=False,
+            tombstone_successor_room_id=None,
+            # Room is not forgotten
+            forgotten=False,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+            user1_snapshot,
+        )
+        # Holds the info according to the current state when the user joined
+        user2_snapshot = _SlidingSyncMembershipSnapshotResult(
+            room_id=room_id,
+            user_id=user2_id,
+            sender=user2_id,
+            membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+            membership=Membership.JOIN,
+            event_stream_ordering=state_map[
+                (EventTypes.Member, user2_id)
+            ].internal_metadata.stream_ordering,
+            has_known_state=True,
+            room_type=None,
+            room_name=None,
+            is_encrypted=False,
+            tombstone_successor_room_id=None,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+            user2_snapshot,
+        )
+
+        # Forget the room
+        channel = self.make_request(
+            "POST",
+            f"/_matrix/client/r0/rooms/{room_id}/forget",
+            content={},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+        # Check on the `sliding_sync_membership_snapshots` table
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+        # Room is now forgotten for user1
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+            attr.evolve(user1_snapshot, forgotten=True),
+        )
+        # Nothing changed for user2
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+            user2_snapshot,
+        )
+
+    def test_membership_snapshot_missing_forget(
+        self,
+    ) -> None:
+        """
+        Test forgetting a room with no existing row in `sliding_sync_membership_snapshots`.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # User1 joins the room
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+        # User1 leaves the room (we have to leave in order to forget the room)
+        self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+        # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+        # happen during event creation.
+        self.get_success(
+            self.store.db_pool.simple_delete_many(
+                table="sliding_sync_membership_snapshots",
+                column="room_id",
+                iterable=(room_id,),
+                keyvalues={},
+                desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_missing",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Forget the room
+        channel = self.make_request(
+            "POST",
+            f"/_matrix/client/r0/rooms/{room_id}/forget",
+            content={},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+        # It doesn't explode
+
+        # We still shouldn't find anything in the table because nothing has re-created them
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            set(),
+            exact=True,
+        )
+
+
+class SlidingSyncTablesBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase):
+    """
+    Test the background updates that populate the `sliding_sync_joined_rooms` and
+    `sliding_sync_membership_snapshots` tables.
+    """
+
+    def test_joined_background_update_missing(self) -> None:
+        """
+        Test that the background update for `sliding_sync_joined_rooms` populates missing rows
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create rooms with various levels of state that should appear in the table
+        #
+        room_id_no_info = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        room_id_with_info = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user1_tok,
+        )
+        # Encrypt the room
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+        # Add a room name
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.Name,
+            {"name": "my super duper space"},
+            tok=user1_tok,
+        )
+
+        # Clean-up the `sliding_sync_joined_rooms` table as if the inserts did not
+        # happen during event creation.
+        self.get_success(
+            self.store.db_pool.simple_delete_many(
+                table="sliding_sync_joined_rooms",
+                column="room_id",
+                iterable=(room_id_no_info, room_id_with_info, space_room_id),
+                keyvalues={},
+                desc="sliding_sync_joined_rooms.test_joined_background_update_missing",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Insert and run the background updates.
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                {
+                    "update_name": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE,
+                    "progress_json": "{}",
+                },
+            )
+        )
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                {
+                    "update_name": _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE,
+                    "progress_json": "{}",
+                    "depends_on": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE,
+                },
+            )
+        )
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Make sure the table is populated
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id_no_info, room_id_with_info, space_room_id},
+            exact=True,
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id_no_info)
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id_no_info],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id_no_info,
+                # History visibility just happens to be the last event sent in the room
+                event_stream_ordering=state_map[
+                    (EventTypes.RoomHistoryVisibility, "")
+                ].internal_metadata.stream_ordering,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id_with_info)
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[room_id_with_info],
+            _SlidingSyncJoinedRoomResult(
+                room_id=room_id_with_info,
+                # Lastest event sent in the room
+                event_stream_ordering=state_map[
+                    (EventTypes.RoomEncryption, "")
+                ].internal_metadata.stream_ordering,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=True,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(space_room_id)
+        )
+        self.assertEqual(
+            sliding_sync_joined_rooms_results[space_room_id],
+            _SlidingSyncJoinedRoomResult(
+                room_id=space_room_id,
+                # Lastest event sent in the room
+                event_stream_ordering=state_map[
+                    (EventTypes.Name, "")
+                ].internal_metadata.stream_ordering,
+                bump_stamp=state_map[
+                    (EventTypes.Create, "")
+                ].internal_metadata.stream_ordering,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_membership_snapshots_background_update_joined(self) -> None:
+        """
+        Test that the background update for `sliding_sync_membership_snapshots`
+        populates missing rows for join memberships.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create rooms with various levels of state that should appear in the table
+        #
+        room_id_no_info = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        room_id_with_info = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user1_tok,
+        )
+        # Encrypt the room
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user1_tok,
+        )
+        # Add a tombstone
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.Tombstone,
+            {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"},
+            tok=user1_tok,
+        )
+
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user1_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+        # Add a room name
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.Name,
+            {"name": "my super duper space"},
+            tok=user1_tok,
+        )
+
+        # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+        # happen during event creation.
+        self.get_success(
+            self.store.db_pool.simple_delete_many(
+                table="sliding_sync_membership_snapshots",
+                column="room_id",
+                iterable=(room_id_no_info, room_id_with_info, space_room_id),
+                keyvalues={},
+                desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_joined",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Insert and run the background update.
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                {
+                    "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+                    "progress_json": "{}",
+                },
+            )
+        )
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Make sure the table is populated
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id_no_info, user1_id),
+                (room_id_with_info, user1_id),
+                (space_room_id, user1_id),
+            },
+            exact=True,
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id_no_info)
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_no_info,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id_with_info)
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (room_id_with_info, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_with_info,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=True,
+                tombstone_successor_room_id="another_room",
+            ),
+        )
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(space_room_id)
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=space_room_id,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_membership_snapshots_background_update_local_invite(self) -> None:
+        """
+        Test that the background update for `sliding_sync_membership_snapshots`
+        populates missing rows for invite memberships.
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create rooms with various levels of state that should appear in the table
+        #
+        room_id_no_info = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        room_id_with_info = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user2_tok,
+        )
+        # Encrypt the room
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user2_tok,
+        )
+        # Add a tombstone
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.Tombstone,
+            {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"},
+            tok=user2_tok,
+        )
+
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user2_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+        # Add a room name
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.Name,
+            {"name": "my super duper space"},
+            tok=user2_tok,
+        )
+
+        # Invite user1 to the rooms
+        user1_invite_room_id_no_info_response = self.helper.invite(
+            room_id_no_info, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+        user1_invite_room_id_with_info_response = self.helper.invite(
+            room_id_with_info, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+        user1_invite_space_room_id_response = self.helper.invite(
+            space_room_id, src=user2_id, targ=user1_id, tok=user2_tok
+        )
+
+        # Have user2 leave the rooms to make sure that our background update is not just
+        # reading from `current_state_events`. For invite/knock memberships, we should
+        # be reading from the stripped state on the invite/knock event itself.
+        self.helper.leave(room_id_no_info, user2_id, tok=user2_tok)
+        self.helper.leave(room_id_with_info, user2_id, tok=user2_tok)
+        self.helper.leave(space_room_id, user2_id, tok=user2_tok)
+        # Check to make sure we actually don't have any `current_state_events` for the rooms
+        current_state_check_rows = self.get_success(
+            self.store.db_pool.simple_select_many_batch(
+                table="current_state_events",
+                column="room_id",
+                iterable=[room_id_no_info, room_id_with_info, space_room_id],
+                retcols=("event_id",),
+                keyvalues={},
+                desc="check current_state_events in test",
+            )
+        )
+        self.assertEqual(len(current_state_check_rows), 0)
+
+        # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+        # happen during event creation.
+        self.get_success(
+            self.store.db_pool.simple_delete_many(
+                table="sliding_sync_membership_snapshots",
+                column="room_id",
+                iterable=(room_id_no_info, room_id_with_info, space_room_id),
+                keyvalues={},
+                desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_local_invite",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Insert and run the background update.
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                {
+                    "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+                    "progress_json": "{}",
+                },
+            )
+        )
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Make sure the table is populated
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                # The invite memberships for user1
+                (room_id_no_info, user1_id),
+                (room_id_with_info, user1_id),
+                (space_room_id, user1_id),
+                # The leave memberships for user2
+                (room_id_no_info, user2_id),
+                (room_id_with_info, user2_id),
+                (space_room_id, user2_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_no_info,
+                user_id=user1_id,
+                sender=user2_id,
+                membership_event_id=user1_invite_room_id_no_info_response["event_id"],
+                membership=Membership.INVITE,
+                event_stream_ordering=self.get_success(
+                    self.store.get_position_for_event(
+                        user1_invite_room_id_no_info_response["event_id"]
+                    )
+                ).stream,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (room_id_with_info, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_with_info,
+                user_id=user1_id,
+                sender=user2_id,
+                membership_event_id=user1_invite_room_id_with_info_response["event_id"],
+                membership=Membership.INVITE,
+                event_stream_ordering=self.get_success(
+                    self.store.get_position_for_event(
+                        user1_invite_room_id_with_info_response["event_id"]
+                    )
+                ).stream,
+                has_known_state=True,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=True,
+                # The tombstone isn't showing here ("another_room") because it's not one
+                # of the stripped events that we hand out as part of the invite event.
+                # Even though we handle this scenario from other remote homservers,
+                # Synapse does not include the tombstone in the invite event.
+                tombstone_successor_room_id=None,
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=space_room_id,
+                user_id=user1_id,
+                sender=user2_id,
+                membership_event_id=user1_invite_space_room_id_response["event_id"],
+                membership=Membership.INVITE,
+                event_stream_ordering=self.get_success(
+                    self.store.get_position_for_event(
+                        user1_invite_space_room_id_response["event_id"]
+                    )
+                ).stream,
+                has_known_state=True,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_membership_snapshots_background_update_remote_invite(
+        self,
+    ) -> None:
+        """
+        Test that the background update for `sliding_sync_membership_snapshots`
+        populates missing rows for remote invites (out-of-band memberships).
+        """
+        user1_id = self.register_user("user1", "pass")
+        _user1_tok = self.login(user1_id, "pass")
+
+        # Create rooms with various levels of state that should appear in the table
+        #
+        room_id_unknown_state, room_id_unknown_state_invite_event = (
+            self._create_remote_invite_room_for_user(user1_id, None)
+        )
+
+        room_id_no_info, room_id_no_info_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                        },
+                    ),
+                ],
+            )
+        )
+
+        room_id_with_info, room_id_with_info_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.Name,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_NAME: "my super duper room",
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.RoomEncryption,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+                        },
+                    ),
+                ],
+            )
+        )
+
+        space_room_id, space_room_id_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                            EventContentFields.ROOM_TYPE: RoomTypes.SPACE,
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.Name,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_NAME: "my super duper space",
+                        },
+                    ),
+                ],
+            )
+        )
+
+        # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+        # happen during event creation.
+        self.get_success(
+            self.store.db_pool.simple_delete_many(
+                table="sliding_sync_membership_snapshots",
+                column="room_id",
+                iterable=(
+                    room_id_unknown_state,
+                    room_id_no_info,
+                    room_id_with_info,
+                    space_room_id,
+                ),
+                keyvalues={},
+                desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_remote_invite",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Insert and run the background update.
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                {
+                    "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+                    "progress_json": "{}",
+                },
+            )
+        )
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Make sure the table is populated
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                # The invite memberships for user1
+                (room_id_unknown_state, user1_id),
+                (room_id_no_info, user1_id),
+                (room_id_with_info, user1_id),
+                (space_room_id, user1_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (room_id_unknown_state, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_unknown_state,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=room_id_unknown_state_invite_event.event_id,
+                membership=Membership.INVITE,
+                event_stream_ordering=room_id_unknown_state_invite_event.internal_metadata.stream_ordering,
+                has_known_state=False,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_no_info,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=room_id_no_info_invite_event.event_id,
+                membership=Membership.INVITE,
+                event_stream_ordering=room_id_no_info_invite_event.internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (room_id_with_info, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_with_info,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=room_id_with_info_invite_event.event_id,
+                membership=Membership.INVITE,
+                event_stream_ordering=room_id_with_info_invite_event.internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=True,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=space_room_id,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=space_room_id_invite_event.event_id,
+                membership=Membership.INVITE,
+                event_stream_ordering=space_room_id_invite_event.internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_membership_snapshots_background_update_remote_invite_rejections_and_retractions(
+        self,
+    ) -> None:
+        """
+        Test that the background update for `sliding_sync_membership_snapshots`
+        populates missing rows for remote invite rejections/retractions (out-of-band memberships).
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Create rooms with various levels of state that should appear in the table
+        #
+        room_id_unknown_state, room_id_unknown_state_invite_event = (
+            self._create_remote_invite_room_for_user(user1_id, None)
+        )
+
+        room_id_no_info, room_id_no_info_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                        },
+                    ),
+                ],
+            )
+        )
+
+        room_id_with_info, room_id_with_info_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.Name,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_NAME: "my super duper room",
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.RoomEncryption,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+                        },
+                    ),
+                ],
+            )
+        )
+
+        space_room_id, space_room_id_invite_event = (
+            self._create_remote_invite_room_for_user(
+                user1_id,
+                [
+                    StrippedStateEvent(
+                        type=EventTypes.Create,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+                            EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+                            EventContentFields.ROOM_TYPE: RoomTypes.SPACE,
+                        },
+                    ),
+                    StrippedStateEvent(
+                        type=EventTypes.Name,
+                        state_key="",
+                        sender="@inviter:remote_server",
+                        content={
+                            EventContentFields.ROOM_NAME: "my super duper space",
+                        },
+                    ),
+                ],
+            )
+        )
+
+        # Reject the remote invites.
+        # Also try retracting a remote invite.
+        room_id_unknown_state_leave_event_response = self.helper.leave(
+            room_id_unknown_state, user1_id, tok=user1_tok
+        )
+        room_id_no_info_leave_event = self._retract_remote_invite_for_user(
+            user_id=user1_id,
+            remote_room_id=room_id_no_info,
+        )
+        room_id_with_info_leave_event_response = self.helper.leave(
+            room_id_with_info, user1_id, tok=user1_tok
+        )
+        space_room_id_leave_event = self._retract_remote_invite_for_user(
+            user_id=user1_id,
+            remote_room_id=space_room_id,
+        )
+
+        # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+        # happen during event creation.
+        self.get_success(
+            self.store.db_pool.simple_delete_many(
+                table="sliding_sync_membership_snapshots",
+                column="room_id",
+                iterable=(
+                    room_id_unknown_state,
+                    room_id_no_info,
+                    room_id_with_info,
+                    space_room_id,
+                ),
+                keyvalues={},
+                desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_remote_invite_rejections_and_retractions",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Insert and run the background update.
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                {
+                    "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+                    "progress_json": "{}",
+                },
+            )
+        )
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Make sure the table is populated
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                # The invite memberships for user1
+                (room_id_unknown_state, user1_id),
+                (room_id_no_info, user1_id),
+                (room_id_with_info, user1_id),
+                (space_room_id, user1_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (room_id_unknown_state, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_unknown_state,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=room_id_unknown_state_leave_event_response[
+                    "event_id"
+                ],
+                membership=Membership.LEAVE,
+                event_stream_ordering=self.get_success(
+                    self.store.get_position_for_event(
+                        room_id_unknown_state_leave_event_response["event_id"]
+                    )
+                ).stream,
+                has_known_state=False,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_no_info,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=room_id_no_info_leave_event.event_id,
+                membership=Membership.LEAVE,
+                event_stream_ordering=room_id_no_info_leave_event.internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (room_id_with_info, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_with_info,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=room_id_with_info_leave_event_response["event_id"],
+                membership=Membership.LEAVE,
+                event_stream_ordering=self.get_success(
+                    self.store.get_position_for_event(
+                        room_id_with_info_leave_event_response["event_id"]
+                    )
+                ).stream,
+                has_known_state=True,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=True,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=space_room_id,
+                user_id=user1_id,
+                sender="@inviter:remote_server",
+                membership_event_id=space_room_id_leave_event.event_id,
+                membership=Membership.LEAVE,
+                event_stream_ordering=space_room_id_leave_event.internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    @parameterized.expand(
+        [
+            # We'll do a kick for this
+            (Membership.LEAVE,),
+            (Membership.BAN,),
+        ]
+    )
+    def test_membership_snapshots_background_update_historical_state(
+        self, test_membership: str
+    ) -> None:
+        """
+        Test that the background update for `sliding_sync_membership_snapshots`
+        populates missing rows for leave memberships.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Create rooms with various levels of state that should appear in the table
+        #
+        room_id_no_info = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        room_id_with_info = self.helper.create_room_as(user2_id, tok=user2_tok)
+        # Add a room name
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user2_tok,
+        )
+        # Encrypt the room
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.RoomEncryption,
+            {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+            tok=user2_tok,
+        )
+        # Add a tombstone
+        self.helper.send_state(
+            room_id_with_info,
+            EventTypes.Tombstone,
+            {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"},
+            tok=user2_tok,
+        )
+
+        space_room_id = self.helper.create_room_as(
+            user1_id,
+            tok=user2_tok,
+            extra_content={
+                "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+            },
+        )
+        # Add a room name
+        self.helper.send_state(
+            space_room_id,
+            EventTypes.Name,
+            {"name": "my super duper space"},
+            tok=user2_tok,
+        )
+
+        # Join the room in preparation for our test_membership
+        self.helper.join(room_id_no_info, user1_id, tok=user1_tok)
+        self.helper.join(room_id_with_info, user1_id, tok=user1_tok)
+        self.helper.join(space_room_id, user1_id, tok=user1_tok)
+
+        if test_membership == Membership.LEAVE:
+            # Kick user1 from the rooms
+            user1_membership_room_id_no_info_response = self.helper.change_membership(
+                room=room_id_no_info,
+                src=user2_id,
+                targ=user1_id,
+                tok=user2_tok,
+                membership=Membership.LEAVE,
+                extra_data={
+                    "reason": "Bad manners",
+                },
+            )
+            user1_membership_room_id_with_info_response = self.helper.change_membership(
+                room=room_id_with_info,
+                src=user2_id,
+                targ=user1_id,
+                tok=user2_tok,
+                membership=Membership.LEAVE,
+                extra_data={
+                    "reason": "Bad manners",
+                },
+            )
+            user1_membership_space_room_id_response = self.helper.change_membership(
+                room=space_room_id,
+                src=user2_id,
+                targ=user1_id,
+                tok=user2_tok,
+                membership=Membership.LEAVE,
+                extra_data={
+                    "reason": "Bad manners",
+                },
+            )
+        elif test_membership == Membership.BAN:
+            # Ban user1 from the rooms
+            user1_membership_room_id_no_info_response = self.helper.ban(
+                room_id_no_info, src=user2_id, targ=user1_id, tok=user2_tok
+            )
+            user1_membership_room_id_with_info_response = self.helper.ban(
+                room_id_with_info, src=user2_id, targ=user1_id, tok=user2_tok
+            )
+            user1_membership_space_room_id_response = self.helper.ban(
+                space_room_id, src=user2_id, targ=user1_id, tok=user2_tok
+            )
+        else:
+            raise AssertionError("Unknown test_membership")
+
+        # Have user2 leave the rooms to make sure that our background update is not just
+        # reading from `current_state_events`. For leave memberships, we should be
+        # reading from the historical state.
+        self.helper.leave(room_id_no_info, user2_id, tok=user2_tok)
+        self.helper.leave(room_id_with_info, user2_id, tok=user2_tok)
+        self.helper.leave(space_room_id, user2_id, tok=user2_tok)
+        # Check to make sure we actually don't have any `current_state_events` for the rooms
+        current_state_check_rows = self.get_success(
+            self.store.db_pool.simple_select_many_batch(
+                table="current_state_events",
+                column="room_id",
+                iterable=[room_id_no_info, room_id_with_info, space_room_id],
+                retcols=("event_id",),
+                keyvalues={},
+                desc="check current_state_events in test",
+            )
+        )
+        self.assertEqual(len(current_state_check_rows), 0)
+
+        # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+        # happen during event creation.
+        self.get_success(
+            self.store.db_pool.simple_delete_many(
+                table="sliding_sync_membership_snapshots",
+                column="room_id",
+                iterable=(room_id_no_info, room_id_with_info, space_room_id),
+                keyvalues={},
+                desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_historical_state",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Insert and run the background update.
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                {
+                    "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+                    "progress_json": "{}",
+                },
+            )
+        )
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Make sure the table is populated
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                # The memberships for user1
+                (room_id_no_info, user1_id),
+                (room_id_with_info, user1_id),
+                (space_room_id, user1_id),
+                # The leave memberships for user2
+                (room_id_no_info, user2_id),
+                (room_id_with_info, user2_id),
+                (space_room_id, user2_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_no_info,
+                user_id=user1_id,
+                # Because user2 kicked/banned user1 from the room
+                sender=user2_id,
+                membership_event_id=user1_membership_room_id_no_info_response[
+                    "event_id"
+                ],
+                membership=test_membership,
+                event_stream_ordering=self.get_success(
+                    self.store.get_position_for_event(
+                        user1_membership_room_id_no_info_response["event_id"]
+                    )
+                ).stream,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get(
+                (room_id_with_info, user1_id)
+            ),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id_with_info,
+                user_id=user1_id,
+                # Because user2 kicked/banned user1 from the room
+                sender=user2_id,
+                membership_event_id=user1_membership_room_id_with_info_response[
+                    "event_id"
+                ],
+                membership=test_membership,
+                event_stream_ordering=self.get_success(
+                    self.store.get_position_for_event(
+                        user1_membership_room_id_with_info_response["event_id"]
+                    )
+                ).stream,
+                has_known_state=True,
+                room_type=None,
+                room_name="my super duper room",
+                is_encrypted=True,
+                tombstone_successor_room_id="another_room",
+            ),
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=space_room_id,
+                user_id=user1_id,
+                # Because user2 kicked/banned user1 from the room
+                sender=user2_id,
+                membership_event_id=user1_membership_space_room_id_response["event_id"],
+                membership=test_membership,
+                event_stream_ordering=self.get_success(
+                    self.store.get_position_for_event(
+                        user1_membership_space_room_id_response["event_id"]
+                    )
+                ).stream,
+                has_known_state=True,
+                room_type=RoomTypes.SPACE,
+                room_name="my super duper space",
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+    def test_membership_snapshots_background_update_forgotten_missing(self) -> None:
+        """
+        Test that a new row is inserted into `sliding_sync_membership_snapshots` when it
+        doesn't exist in the table yet.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+        # User1 joins the room
+        self.helper.join(room_id, user1_id, tok=user1_tok)
+        # User1 leaves the room (we have to leave in order to forget the room)
+        self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+        state_map = self.get_success(
+            self.storage_controllers.state.get_current_state(room_id)
+        )
+
+        # Forget the room
+        channel = self.make_request(
+            "POST",
+            f"/_matrix/client/r0/rooms/{room_id}/forget",
+            content={},
+            access_token=user1_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+        # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+        # happen during event creation.
+        self.get_success(
+            self.store.db_pool.simple_delete_many(
+                table="sliding_sync_membership_snapshots",
+                column="room_id",
+                iterable=(room_id,),
+                keyvalues={},
+                desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_missing",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Insert and run the background update.
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                {
+                    "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+                    "progress_json": "{}",
+                },
+            )
+        )
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Make sure the table is populated
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id,
+                user_id=user1_id,
+                sender=user1_id,
+                membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+                membership=Membership.LEAVE,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user1_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+                # Room is forgotten
+                forgotten=True,
+            ),
+        )
+        # Holds the info according to the current state when the user joined
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+            _SlidingSyncMembershipSnapshotResult(
+                room_id=room_id,
+                user_id=user2_id,
+                sender=user2_id,
+                membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+                membership=Membership.JOIN,
+                event_stream_ordering=state_map[
+                    (EventTypes.Member, user2_id)
+                ].internal_metadata.stream_ordering,
+                has_known_state=True,
+                room_type=None,
+                room_name=None,
+                is_encrypted=False,
+                tombstone_successor_room_id=None,
+            ),
+        )
+
+
+class SlidingSyncTablesCatchUpBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase):
+    """
+    Test the background updates for catch-up after Synapse downgrade to populate the
+    `sliding_sync_joined_rooms` and `sliding_sync_membership_snapshots` tables.
+
+    This to test the "catch-up" version of the background update vs the "normal"
+    background update to populate the tables with all of the historical data. Both
+    versions share the same background update but just serve different purposes. We
+    check if the "catch-up" version needs to run on start-up based on whether there have
+    been any changes to rooms that aren't reflected in the sliding sync tables.
+
+    FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+    foreground update for
+    `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+    https://github.com/element-hq/synapse/issues/17623)
+    """
+
+    def test_joined_background_update_catch_up_new_room(self) -> None:
+        """
+        Test that new rooms while Synapse is downgraded (making
+        `sliding_sync_joined_rooms` stale) will be caught when Synapse is upgraded and
+        the catch-up routine is run.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Instead of testing with various levels of room state that should appear in the
+        # table, we're only using one room to keep this test simple. Because the
+        # underlying background update to populate these tables is the same as this
+        # catch-up routine, we are going to rely on
+        # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Make sure all of the background updates have finished before we start the
+        # catch-up. Even though it should work fine if the other background update is
+        # still running, we want to see the catch-up routine restore the progress
+        # correctly.
+        #
+        # We also don't want the normal background update messing with our results so we
+        # run this before we do our manual database clean-up to simulate new events
+        # being sent while Synapse was downgraded.
+        self.wait_for_background_updates()
+
+        # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made
+        # it into the table. This is to simulate the a new room while Synapse was
+        # downgraded.
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="sliding_sync_joined_rooms",
+                keyvalues={"room_id": room_id},
+                desc="simulate new room while Synapse was downgraded",
+            )
+        )
+
+        # The function under test. It should clear out stale data and start the
+        # background update to catch-up on the missing data.
+        self.get_success(
+            self.store.db_pool.runInteraction(
+                "_resolve_stale_data_in_sliding_sync_joined_rooms_table",
+                _resolve_stale_data_in_sliding_sync_joined_rooms_table,
+            )
+        )
+
+        # We shouldn't see any new data yet
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Wait for the catch-up background update to finish
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Ensure that the table is populated correctly after the catch-up background
+        # update finishes
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id},
+            exact=True,
+        )
+
+    def test_joined_background_update_catch_up_room_state_change(self) -> None:
+        """
+        Test that new events while Synapse is downgraded (making
+        `sliding_sync_joined_rooms` stale) will be caught when Synapse is upgraded and
+        the catch-up routine is run.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Instead of testing with various levels of room state that should appear in the
+        # table, we're only using one room to keep this test simple. Because the
+        # underlying background update to populate these tables is the same as this
+        # catch-up routine, we are going to rely on
+        # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Get a snapshot of the `sliding_sync_joined_rooms` table before we add some state
+        sliding_sync_joined_rooms_results_before_state = (
+            self._get_sliding_sync_joined_rooms()
+        )
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results_before_state.keys()),
+            {room_id},
+            exact=True,
+        )
+
+        # Add a room name
+        self.helper.send_state(
+            room_id,
+            EventTypes.Name,
+            {"name": "my super duper room"},
+            tok=user1_tok,
+        )
+
+        # Make sure all of the background updates have finished before we start the
+        # catch-up. Even though it should work fine if the other background update is
+        # still running, we want to see the catch-up routine restore the progress
+        # correctly.
+        #
+        # We also don't want the normal background update messing with our results so we
+        # run this before we do our manual database clean-up to simulate new events
+        # being sent while Synapse was downgraded.
+        self.wait_for_background_updates()
+
+        # Clean-up the `sliding_sync_joined_rooms` table as if the the room name
+        # never made it into the table. This is to simulate the room name event
+        # being sent while Synapse was downgraded.
+        self.get_success(
+            self.store.db_pool.simple_update(
+                table="sliding_sync_joined_rooms",
+                keyvalues={"room_id": room_id},
+                updatevalues={
+                    # Clear the room name
+                    "room_name": None,
+                    # Reset the `event_stream_ordering` back to the value before the room name
+                    "event_stream_ordering": sliding_sync_joined_rooms_results_before_state[
+                        room_id
+                    ].event_stream_ordering,
+                },
+                desc="simulate new events while Synapse was downgraded",
+            )
+        )
+
+        # The function under test. It should clear out stale data and start the
+        # background update to catch-up on the missing data.
+        self.get_success(
+            self.store.db_pool.runInteraction(
+                "_resolve_stale_data_in_sliding_sync_joined_rooms_table",
+                _resolve_stale_data_in_sliding_sync_joined_rooms_table,
+            )
+        )
+
+        # Ensure that the stale data is deleted from the table
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Wait for the catch-up background update to finish
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Ensure that the table is populated correctly after the catch-up background
+        # update finishes
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id},
+            exact=True,
+        )
+
+    def test_joined_background_update_catch_up_no_rooms(self) -> None:
+        """
+        Test that if you start your homeserver with no rooms on a Synapse version that
+        supports the sliding sync tables and the historical background update completes
+        (because no rooms to process), then Synapse is downgraded and new rooms are
+        created/joined; when Synapse is upgraded, the rooms will be processed catch-up
+        routine is run.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        # Instead of testing with various levels of room state that should appear in the
+        # table, we're only using one room to keep this test simple. Because the
+        # underlying background update to populate these tables is the same as this
+        # catch-up routine, we are going to rely on
+        # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+        # Make sure all of the background updates have finished before we start the
+        # catch-up. Even though it should work fine if the other background update is
+        # still running, we want to see the catch-up routine restore the progress
+        # correctly.
+        #
+        # We also don't want the normal background update messing with our results so we
+        # run this before we do our manual database clean-up to simulate room being
+        # created while Synapse was downgraded.
+        self.wait_for_background_updates()
+
+        # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made
+        # it into the table. This is to simulate the room being created while Synapse
+        # was downgraded.
+        self.get_success(
+            self.store.db_pool.simple_delete_many(
+                table="sliding_sync_joined_rooms",
+                column="room_id",
+                iterable=(room_id,),
+                keyvalues={},
+                desc="simulate room being created while Synapse was downgraded",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # The function under test. It should clear out stale data and start the
+        # background update to catch-up on the missing data.
+        self.get_success(
+            self.store.db_pool.runInteraction(
+                "_resolve_stale_data_in_sliding_sync_joined_rooms_table",
+                _resolve_stale_data_in_sliding_sync_joined_rooms_table,
+            )
+        )
+
+        # We still shouldn't find any data yet
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Wait for the catch-up background update to finish
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Ensure that the table is populated correctly after the catch-up background
+        # update finishes
+        sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+        self.assertIncludes(
+            set(sliding_sync_joined_rooms_results.keys()),
+            {room_id},
+            exact=True,
+        )
+
+    def test_membership_snapshots_background_update_catch_up_new_membership(
+        self,
+    ) -> None:
+        """
+        Test that completely new membership while Synapse is downgraded (making
+        `sliding_sync_membership_snapshots` stale) will be caught when Synapse is
+        upgraded and the catch-up routine is run.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Instead of testing with various levels of room state that should appear in the
+        # table, we're only using one room to keep this test simple. Because the
+        # underlying background update to populate these tables is the same as this
+        # catch-up routine, we are going to rely on
+        # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # User2 joins the room
+        self.helper.join(room_id, user2_id, tok=user2_tok)
+
+        # Both users are joined to the room
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+
+        # Make sure all of the background updates have finished before we start the
+        # catch-up. Even though it should work fine if the other background update is
+        # still running, we want to see the catch-up routine restore the progress
+        # correctly.
+        #
+        # We also don't want the normal background update messing with our results so we
+        # run this before we do our manual database clean-up to simulate new events
+        # being sent while Synapse was downgraded.
+        self.wait_for_background_updates()
+
+        # Clean-up the `sliding_sync_membership_snapshots` table as if the user2
+        # membership never made it into the table. This is to simulate a membership
+        # change while Synapse was downgraded.
+        self.get_success(
+            self.store.db_pool.simple_delete(
+                table="sliding_sync_membership_snapshots",
+                keyvalues={"room_id": room_id, "user_id": user2_id},
+                desc="simulate new membership while Synapse was downgraded",
+            )
+        )
+
+        # We shouldn't find the user2 membership in the table because we just deleted it
+        # in preparation for the test.
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+            },
+            exact=True,
+        )
+
+        # The function under test. It should clear out stale data and start the
+        # background update to catch-up on the missing data.
+        self.get_success(
+            self.store.db_pool.runInteraction(
+                "_resolve_stale_data_in_sliding_sync_membership_snapshots_table",
+                _resolve_stale_data_in_sliding_sync_membership_snapshots_table,
+            )
+        )
+
+        # We still shouldn't find any data yet
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+            },
+            exact=True,
+        )
+
+        # Wait for the catch-up background update to finish
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Ensure that the table is populated correctly after the catch-up background
+        # update finishes
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+
+    def test_membership_snapshots_background_update_catch_up_membership_change(
+        self,
+    ) -> None:
+        """
+        Test that membership changes while Synapse is downgraded (making
+        `sliding_sync_membership_snapshots` stale) will be caught when Synapse is upgraded and
+        the catch-up routine is run.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Instead of testing with various levels of room state that should appear in the
+        # table, we're only using one room to keep this test simple. Because the
+        # underlying background update to populate these tables is the same as this
+        # catch-up routine, we are going to rely on
+        # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # User2 joins the room
+        self.helper.join(room_id, user2_id, tok=user2_tok)
+
+        # Both users are joined to the room
+        sliding_sync_membership_snapshots_results_before_membership_changes = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(
+                sliding_sync_membership_snapshots_results_before_membership_changes.keys()
+            ),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+
+        # User2 leaves the room
+        self.helper.leave(room_id, user2_id, tok=user2_tok)
+
+        # Make sure all of the background updates have finished before we start the
+        # catch-up. Even though it should work fine if the other background update is
+        # still running, we want to see the catch-up routine restore the progress
+        # correctly.
+        #
+        # We also don't want the normal background update messing with our results so we
+        # run this before we do our manual database clean-up to simulate new events
+        # being sent while Synapse was downgraded.
+        self.wait_for_background_updates()
+
+        # Rollback the `sliding_sync_membership_snapshots` table as if the user2
+        # membership never made it into the table. This is to simulate a membership
+        # change while Synapse was downgraded.
+        self.get_success(
+            self.store.db_pool.simple_update(
+                table="sliding_sync_membership_snapshots",
+                keyvalues={"room_id": room_id, "user_id": user2_id},
+                updatevalues={
+                    # Reset everything back to the value before user2 left the room
+                    "membership": sliding_sync_membership_snapshots_results_before_membership_changes[
+                        (room_id, user2_id)
+                    ].membership,
+                    "membership_event_id": sliding_sync_membership_snapshots_results_before_membership_changes[
+                        (room_id, user2_id)
+                    ].membership_event_id,
+                    "event_stream_ordering": sliding_sync_membership_snapshots_results_before_membership_changes[
+                        (room_id, user2_id)
+                    ].event_stream_ordering,
+                },
+                desc="simulate membership change while Synapse was downgraded",
+            )
+        )
+
+        # We should see user2 still joined to the room because we made that change in
+        # preparation for the test.
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+            sliding_sync_membership_snapshots_results_before_membership_changes[
+                (room_id, user1_id)
+            ],
+        )
+        self.assertEqual(
+            sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+            sliding_sync_membership_snapshots_results_before_membership_changes[
+                (room_id, user2_id)
+            ],
+        )
+
+        # The function under test. It should clear out stale data and start the
+        # background update to catch-up on the missing data.
+        self.get_success(
+            self.store.db_pool.runInteraction(
+                "_resolve_stale_data_in_sliding_sync_membership_snapshots_table",
+                _resolve_stale_data_in_sliding_sync_membership_snapshots_table,
+            )
+        )
+
+        # Ensure that the stale data is deleted from the table
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+            },
+            exact=True,
+        )
+
+        # Wait for the catch-up background update to finish
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Ensure that the table is populated correctly after the catch-up background
+        # update finishes
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
+
+    def test_membership_snapshots_background_update_catch_up_no_membership(
+        self,
+    ) -> None:
+        """
+        Test that if you start your homeserver with no rooms on a Synapse version that
+        supports the sliding sync tables and the historical background update completes
+        (because no rooms/membership to process), then Synapse is downgraded and new
+        rooms are created/joined; when Synapse is upgraded, the rooms will be processed
+        catch-up routine is run.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        # Instead of testing with various levels of room state that should appear in the
+        # table, we're only using one room to keep this test simple. Because the
+        # underlying background update to populate these tables is the same as this
+        # catch-up routine, we are going to rely on
+        # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+        room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+        # User2 joins the room
+        self.helper.join(room_id, user2_id, tok=user2_tok)
+
+        # Make sure all of the background updates have finished before we start the
+        # catch-up. Even though it should work fine if the other background update is
+        # still running, we want to see the catch-up routine restore the progress
+        # correctly.
+        #
+        # We also don't want the normal background update messing with our results so we
+        # run this before we do our manual database clean-up to simulate new events
+        # being sent while Synapse was downgraded.
+        self.wait_for_background_updates()
+
+        # Rollback the `sliding_sync_membership_snapshots` table as if the user2
+        # membership never made it into the table. This is to simulate a membership
+        # change while Synapse was downgraded.
+        self.get_success(
+            self.store.db_pool.simple_delete_many(
+                table="sliding_sync_membership_snapshots",
+                column="room_id",
+                iterable=(room_id,),
+                keyvalues={},
+                desc="simulate room being created while Synapse was downgraded",
+            )
+        )
+
+        # We shouldn't find anything in the table because we just deleted them in
+        # preparation for the test.
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # The function under test. It should clear out stale data and start the
+        # background update to catch-up on the missing data.
+        self.get_success(
+            self.store.db_pool.runInteraction(
+                "_resolve_stale_data_in_sliding_sync_membership_snapshots_table",
+                _resolve_stale_data_in_sliding_sync_membership_snapshots_table,
+            )
+        )
+
+        # We still shouldn't find any data yet
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            set(),
+            exact=True,
+        )
+
+        # Wait for the catch-up background update to finish
+        self.store.db_pool.updates._all_done = False
+        self.wait_for_background_updates()
+
+        # Ensure that the table is populated correctly after the catch-up background
+        # update finishes
+        sliding_sync_membership_snapshots_results = (
+            self._get_sliding_sync_membership_snapshots()
+        )
+        self.assertIncludes(
+            set(sliding_sync_membership_snapshots_results.keys()),
+            {
+                (room_id, user1_id),
+                (room_id, user2_id),
+            },
+            exact=True,
+        )
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index aad46b1b44..ed5f286243 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -27,7 +27,13 @@ from immutabledict import immutabledict
 
 from twisted.test.proto_helpers import MemoryReactor
 
-from synapse.api.constants import Direction, EventTypes, Membership, RelationTypes
+from synapse.api.constants import (
+    Direction,
+    EventTypes,
+    JoinRules,
+    Membership,
+    RelationTypes,
+)
 from synapse.api.filtering import Filter
 from synapse.crypto.event_signing import add_hashes_and_signatures
 from synapse.events import FrozenEventV3
@@ -147,8 +153,8 @@ class PaginationTestCase(HomeserverTestCase):
     def _filter_messages(self, filter: JsonDict) -> List[str]:
         """Make a request to /messages with a filter, returns the chunk of events."""
 
-        events, next_key = self.get_success(
-            self.hs.get_datastores().main.paginate_room_events(
+        events, next_key, _ = self.get_success(
+            self.hs.get_datastores().main.paginate_room_events_by_topological_ordering(
                 room_id=self.room_id,
                 from_key=self.from_token.room_key,
                 to_key=None,
@@ -556,6 +562,47 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
             ),
         )
 
+    def test_restrict_event_types(self) -> None:
+        """
+        Test that we only consider given `event_types` when finding the last event
+        before a token.
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
+        event_response = self.helper.send_event(
+            room_id1,
+            type="org.matrix.special_message",
+            content={"body": "before1, target!"},
+            tok=user1_tok,
+        )
+        self.helper.send(room_id1, "before2", tok=user1_tok)
+
+        after_room_token = self.event_sources.get_current_token()
+
+        # Send some events after the token
+        self.helper.send_event(
+            room_id1,
+            type="org.matrix.special_message",
+            content={"body": "after1"},
+            tok=user1_tok,
+        )
+        self.helper.send(room_id1, "after2", tok=user1_tok)
+
+        last_event_result = self.get_success(
+            self.store.get_last_event_pos_in_room_before_stream_ordering(
+                room_id=room_id1,
+                end_token=after_room_token.room_key,
+                event_types=["org.matrix.special_message"],
+            )
+        )
+        assert last_event_result is not None
+        last_event_id, _ = last_event_result
+
+        # Make sure it's the last event before the token
+        self.assertEqual(last_event_id, event_response["event_id"])
+
 
 class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase):
     """
@@ -1113,7 +1160,7 @@ class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase):
                     room_id=room_id1,
                     event_id=None,
                     event_pos=dummy_state_pos,
-                    membership="leave",
+                    membership=Membership.LEAVE,
                     sender=None,  # user1_id,
                     prev_event_id=join_response1["event_id"],
                     prev_event_pos=join_pos1,
@@ -1123,6 +1170,81 @@ class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase):
             ],
         )
 
+    def test_state_reset2(self) -> None:
+        """
+        Test a state reset scenario where the user gets removed from the room (when
+        there is no corresponding leave event)
+        """
+        user1_id = self.register_user("user1", "pass")
+        user1_tok = self.login(user1_id, "pass")
+        user2_id = self.register_user("user2", "pass")
+        user2_tok = self.login(user2_id, "pass")
+
+        room_id1 = self.helper.create_room_as(user2_id, is_public=True, tok=user2_tok)
+
+        event_response = self.helper.send(room_id1, "test", tok=user2_tok)
+        event_id = event_response["event_id"]
+
+        user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+        user1_join_pos = self.get_success(
+            self.store.get_position_for_event(user1_join_response["event_id"])
+        )
+
+        before_reset_token = self.event_sources.get_current_token()
+
+        # Trigger a state reset
+        join_rule_event, join_rule_context = self.get_success(
+            create_event(
+                self.hs,
+                prev_event_ids=[event_id],
+                type=EventTypes.JoinRules,
+                state_key="",
+                content={"join_rule": JoinRules.INVITE},
+                sender=user2_id,
+                room_id=room_id1,
+                room_version=self.get_success(self.store.get_room_version_id(room_id1)),
+            )
+        )
+        _, join_rule_event_pos, _ = self.get_success(
+            self.persistence.persist_event(join_rule_event, join_rule_context)
+        )
+
+        # FIXME: We're manually busting the cache since
+        # https://github.com/element-hq/synapse/issues/17368 is not solved yet
+        self.store._membership_stream_cache.entity_has_changed(
+            user1_id, join_rule_event_pos.stream
+        )
+
+        after_reset_token = self.event_sources.get_current_token()
+
+        membership_changes = self.get_success(
+            self.store.get_current_state_delta_membership_changes_for_user(
+                user1_id,
+                from_key=before_reset_token.room_key,
+                to_key=after_reset_token.room_key,
+            )
+        )
+
+        # Let the whole diff show on failure
+        self.maxDiff = None
+        self.assertEqual(
+            membership_changes,
+            [
+                CurrentStateDeltaMembership(
+                    room_id=room_id1,
+                    event_id=None,
+                    # The position where the state reset happened
+                    event_pos=join_rule_event_pos,
+                    membership=Membership.LEAVE,
+                    sender=None,
+                    prev_event_id=user1_join_response["event_id"],
+                    prev_event_pos=user1_join_pos,
+                    prev_membership="join",
+                    prev_sender=user1_id,
+                ),
+            ],
+        )
+
     def test_excluded_room_ids(self) -> None:
         """
         Test that the `excluded_room_ids` option excludes changes from the specified rooms.
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index 6d1ae4c8d7..f12402f5f2 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -292,12 +292,14 @@ class EventAuthTestCase(unittest.TestCase):
         ]
 
         # pleb should not be able to send state
-        self.assertRaises(
-            AuthError,
-            event_auth.check_state_dependent_auth_rules,
-            _random_state_event(RoomVersions.V1, pleb),
-            auth_events,
-        ),
+        (
+            self.assertRaises(
+                AuthError,
+                event_auth.check_state_dependent_auth_rules,
+                _random_state_event(RoomVersions.V1, pleb),
+                auth_events,
+            ),
+        )
 
         # king should be able to send state
         event_auth.check_state_dependent_auth_rules(
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 4e9adc0625..94b0fa9856 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -101,7 +101,9 @@ class MessageAcceptTests(unittest.HomeserverTestCase):
         ) -> List[EventBase]:
             return list(pdus)
 
-        self.client._check_sigs_and_hash_for_pulled_events_and_fetch = _check_sigs_and_hash_for_pulled_events_and_fetch  # type: ignore[assignment]
+        self.client._check_sigs_and_hash_for_pulled_events_and_fetch = (  # type: ignore[method-assign]
+            _check_sigs_and_hash_for_pulled_events_and_fetch  # type: ignore[assignment]
+        )
 
         # Send the join, it should return None (which is not an error)
         self.assertEqual(
diff --git a/tests/test_server.py b/tests/test_server.py
index 0910ea5f28..9ff2589497 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -392,8 +392,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
         )
 
         self.assertEqual(channel.code, 301)
-        headers = channel.result["headers"]
-        location_headers = [v for k, v in headers if k == b"Location"]
+        location_headers = channel.headers.getRawHeaders(b"Location", [])
         self.assertEqual(location_headers, [b"/look/an/eagle"])
 
     def test_redirect_exception_with_cookie(self) -> None:
@@ -415,10 +414,10 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
         )
 
         self.assertEqual(channel.code, 304)
-        headers = channel.result["headers"]
-        location_headers = [v for k, v in headers if k == b"Location"]
+        headers = channel.headers
+        location_headers = headers.getRawHeaders(b"Location", [])
         self.assertEqual(location_headers, [b"/no/over/there"])
-        cookies_headers = [v for k, v in headers if k == b"Set-Cookie"]
+        cookies_headers = headers.getRawHeaders(b"Set-Cookie", [])
         self.assertEqual(cookies_headers, [b"session=yespls"])
 
     def test_head_request(self) -> None:
diff --git a/tests/test_types.py b/tests/test_types.py
index 944aa784fc..0c08bc8ecc 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -19,9 +19,18 @@
 #
 #
 
+from typing import Type
+from unittest import skipUnless
+
+from immutabledict import immutabledict
+from parameterized import parameterized_class
+
 from synapse.api.errors import SynapseError
 from synapse.types import (
+    AbstractMultiWriterStreamToken,
+    MultiWriterStreamToken,
     RoomAlias,
+    RoomStreamToken,
     UserID,
     get_domain_from_id,
     get_localpart_from_id,
@@ -29,6 +38,7 @@ from synapse.types import (
 )
 
 from tests import unittest
+from tests.utils import USE_POSTGRES_FOR_TESTS
 
 
 class IsMineIDTests(unittest.HomeserverTestCase):
@@ -127,3 +137,66 @@ class MapUsernameTestCase(unittest.TestCase):
         # this should work with either a unicode or a bytes
         self.assertEqual(map_username_to_mxid_localpart("têst"), "t=c3=aast")
         self.assertEqual(map_username_to_mxid_localpart("têst".encode()), "t=c3=aast")
+
+
+@parameterized_class(
+    ("token_type",),
+    [
+        (MultiWriterStreamToken,),
+        (RoomStreamToken,),
+    ],
+    class_name_func=lambda cls,
+    num,
+    params_dict: f"{cls.__name__}_{params_dict['token_type'].__name__}",
+)
+class MultiWriterTokenTestCase(unittest.HomeserverTestCase):
+    """Tests for the different types of multi writer tokens."""
+
+    token_type: Type[AbstractMultiWriterStreamToken]
+
+    def test_basic_token(self) -> None:
+        """Test that a simple stream token can be serialized and unserialized"""
+        store = self.hs.get_datastores().main
+
+        token = self.token_type(stream=5)
+
+        string_token = self.get_success(token.to_string(store))
+
+        if isinstance(token, RoomStreamToken):
+            self.assertEqual(string_token, "s5")
+        else:
+            self.assertEqual(string_token, "5")
+
+        parsed_token = self.get_success(self.token_type.parse(store, string_token))
+        self.assertEqual(parsed_token, token)
+
+    @skipUnless(USE_POSTGRES_FOR_TESTS, "Requires Postgres")
+    def test_instance_map(self) -> None:
+        """Test for stream token with instance map"""
+        store = self.hs.get_datastores().main
+
+        token = self.token_type(stream=5, instance_map=immutabledict({"foo": 6}))
+
+        string_token = self.get_success(token.to_string(store))
+        self.assertEqual(string_token, "m5~1.6")
+
+        parsed_token = self.get_success(self.token_type.parse(store, string_token))
+        self.assertEqual(parsed_token, token)
+
+    def test_instance_map_assertion(self) -> None:
+        """Test that we assert values in the instance map are greater than the
+        min stream position"""
+
+        with self.assertRaises(ValueError):
+            self.token_type(stream=5, instance_map=immutabledict({"foo": 4}))
+
+        with self.assertRaises(ValueError):
+            self.token_type(stream=5, instance_map=immutabledict({"foo": 5}))
+
+    def test_parse_bad_token(self) -> None:
+        """Test that we can parse tokens produced by a bug in Synapse of the
+        form `m5~`"""
+        store = self.hs.get_datastores().main
+
+        parsed_token = self.get_success(self.token_type.parse(store, "m5~"))
+        self.assertEqual(parsed_token, self.token_type(stream=5))
diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py
index 4ab42a02b9..4d7adf7204 100644
--- a/tests/test_utils/__init__.py
+++ b/tests/test_utils/__init__.py
@@ -22,6 +22,7 @@
 """
 Utilities for running the unit tests
 """
+
 import json
 import sys
 import warnings
diff --git a/tests/unittest.py b/tests/unittest.py
index a7c20556a0..614e805abd 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -28,6 +28,7 @@ import logging
 import secrets
 import time
 from typing import (
+    AbstractSet,
     Any,
     Awaitable,
     Callable,
@@ -269,6 +270,56 @@ class TestCase(unittest.TestCase):
                 required[key], actual[key], msg="%s mismatch. %s" % (key, actual)
             )
 
+    def assertIncludes(
+        self,
+        actual_items: AbstractSet[TV],
+        expected_items: AbstractSet[TV],
+        exact: bool = False,
+        message: Optional[str] = None,
+    ) -> None:
+        """
+        Assert that all of the `expected_items` are included in the `actual_items`.
+
+        This assert could also be called `assertContains`, `assertItemsInSet`
+
+        Args:
+            actual_items: The container
+            expected_items: The items to check for in the container
+            exact: Whether the actual state should be exactly equal to the expected
+                state (no extras).
+            message: Optional message to include in the failure message.
+        """
+        # Check that each set has the same items
+        if exact and actual_items == expected_items:
+            return
+        # Check for a superset
+        elif not exact and actual_items >= expected_items:
+            return
+
+        expected_lines: List[str] = []
+        for expected_item in expected_items:
+            is_expected_in_actual = expected_item in actual_items
+            expected_lines.append(
+                "{}  {}".format(" " if is_expected_in_actual else "?", expected_item)
+            )
+
+        actual_lines: List[str] = []
+        for actual_item in actual_items:
+            is_actual_in_expected = actual_item in expected_items
+            actual_lines.append(
+                "{}  {}".format("+" if is_actual_in_expected else " ", actual_item)
+            )
+
+        newline = "\n"
+        expected_string = f"Expected items to be in actual ('?' = missing expected items):\n {{\n{newline.join(expected_lines)}\n }}"
+        actual_string = f"Actual ('+' = found expected items):\n {{\n{newline.join(actual_lines)}\n }}"
+        first_message = (
+            "Items must match exactly" if exact else "Some expected items are missing."
+        )
+        diff_message = f"{first_message}\n{expected_string}\n{actual_string}"
+
+        self.fail(f"{diff_message}\n{message}")
+
 
 def DEBUG(target: TV) -> TV:
     """A decorator to set the .loglevel attribute to logging.DEBUG.
@@ -406,7 +457,9 @@ class HomeserverTestCase(TestCase):
                 # Type ignore: mypy doesn't like us assigning to methods.
                 self.hs.get_auth().get_user_by_req = get_requester  # type: ignore[method-assign]
                 self.hs.get_auth().get_user_by_access_token = get_requester  # type: ignore[method-assign]
-                self.hs.get_auth().get_access_token_from_request = Mock(return_value=token)  # type: ignore[method-assign]
+                self.hs.get_auth().get_access_token_from_request = Mock(  # type: ignore[method-assign]
+                    return_value=token
+                )
 
         if self.needs_threadpool:
             self.reactor.threadpool = ThreadPool()  # type: ignore[assignment]
diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py
index fb67146c69..13a4e6ddaa 100644
--- a/tests/util/test_check_dependencies.py
+++ b/tests/util/test_check_dependencies.py
@@ -21,6 +21,7 @@
 
 from contextlib import contextmanager
 from os import PathLike
+from pathlib import Path
 from typing import Generator, Optional, Union
 from unittest.mock import patch
 
@@ -41,7 +42,7 @@ class DummyDistribution(metadata.Distribution):
     def version(self) -> str:
         return self._version
 
-    def locate_file(self, path: Union[str, PathLike]) -> PathLike:
+    def locate_file(self, path: Union[str, PathLike]) -> Path:
         raise NotImplementedError()
 
     def read_text(self, filename: str) -> None:
diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py
index 5d38718a50..c41f5706af 100644
--- a/tests/util/test_stream_change_cache.py
+++ b/tests/util/test_stream_change_cache.py
@@ -53,8 +53,8 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
         # return True, whether it's a known entity or not.
         self.assertTrue(cache.has_entity_changed("user@foo.com", 0))
         self.assertTrue(cache.has_entity_changed("not@here.website", 0))
-        self.assertTrue(cache.has_entity_changed("user@foo.com", 3))
-        self.assertTrue(cache.has_entity_changed("not@here.website", 3))
+        self.assertTrue(cache.has_entity_changed("user@foo.com", 2))
+        self.assertTrue(cache.has_entity_changed("not@here.website", 2))
 
     def test_entity_has_changed_pops_off_start(self) -> None:
         """
@@ -76,9 +76,11 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
         self.assertTrue("user@foo.com" not in cache._entity_to_key)
 
         self.assertEqual(
-            cache.get_all_entities_changed(3).entities, ["user@elsewhere.org"]
+            cache.get_all_entities_changed(2).entities,
+            ["bar@baz.net", "user@elsewhere.org"],
         )
-        self.assertFalse(cache.get_all_entities_changed(2).hit)
+        self.assertFalse(cache.get_all_entities_changed(1).hit)
+        self.assertTrue(cache.get_all_entities_changed(2).hit)
 
         # If we update an existing entity, it keeps the two existing entities
         cache.entity_has_changed("bar@baz.net", 5)
@@ -89,7 +91,8 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
             cache.get_all_entities_changed(3).entities,
             ["user@elsewhere.org", "bar@baz.net"],
         )
-        self.assertFalse(cache.get_all_entities_changed(2).hit)
+        self.assertFalse(cache.get_all_entities_changed(1).hit)
+        self.assertTrue(cache.get_all_entities_changed(2).hit)
 
     def test_get_all_entities_changed(self) -> None:
         """
@@ -114,7 +117,8 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
         self.assertEqual(
             cache.get_all_entities_changed(3).entities, ["user@elsewhere.org"]
         )
-        self.assertFalse(cache.get_all_entities_changed(1).hit)
+        self.assertFalse(cache.get_all_entities_changed(0).hit)
+        self.assertTrue(cache.get_all_entities_changed(1).hit)
 
         # ... later, things gest more updates
         cache.entity_has_changed("user@foo.com", 5)
@@ -149,7 +153,7 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
         # With no entities, it returns True for the past, present, and False for
         # the future.
         self.assertTrue(cache.has_any_entity_changed(0))
-        self.assertTrue(cache.has_any_entity_changed(1))
+        self.assertFalse(cache.has_any_entity_changed(1))
         self.assertFalse(cache.has_any_entity_changed(2))
 
         # We add an entity
@@ -249,5 +253,5 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
         self.assertEqual(cache.get_max_pos_of_last_change("bar@baz.net"), 3)
         self.assertEqual(cache.get_max_pos_of_last_change("user@elsewhere.org"), 4)
 
-        # Unknown entities will return the stream start position.
-        self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), 1)
+        # Unknown entities will return None
+        self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), None)