1
0
Fork 0
mirror of https://github.com/element-hq/synapse.git synced 2025-03-31 03:45:13 +00:00

Merge branch 'develop' into register-email-3pid-race

This commit is contained in:
mcalinghee 2025-02-04 15:50:27 +01:00 committed by GitHub
commit 75580256bf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
98 changed files with 8766 additions and 4657 deletions

View file

@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@80620a5d27ce0ae443b965134db88467fc607b43 # v7
uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}

3813
CHANGES.md

File diff suppressed because it is too large Load diff

43
Cargo.lock generated
View file

@ -124,10 +124,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if",
"js-sys",
"libc",
"wasi",
"wasm-bindgen",
]
[[package]]
@ -218,9 +216,9 @@ checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
[[package]]
name = "log"
version = "0.4.22"
version = "0.4.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f"
[[package]]
name = "memchr"
@ -272,9 +270,9 @@ dependencies = [
[[package]]
name = "pyo3"
version = "0.23.3"
version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e484fd2c8b4cb67ab05a318f1fd6fa8f199fcc30819f08f07d200809dba26c15"
checksum = "57fe09249128b3173d092de9523eaa75136bf7ba85e0d69eca241c7939c933cc"
dependencies = [
"anyhow",
"cfg-if",
@ -291,9 +289,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
version = "0.23.3"
version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc0e0469a84f208e20044b98965e1561028180219e35352a2afaf2b942beff3b"
checksum = "1cd3927b5a78757a0d71aa9dff669f903b1eb64b54142a9bd9f757f8fde65fd7"
dependencies = [
"once_cell",
"target-lexicon",
@ -301,9 +299,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
version = "0.23.3"
version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb1547a7f9966f6f1a0f0227564a9945fe36b90da5a93b3933fc3dc03fae372d"
checksum = "dab6bb2102bd8f991e7749f130a70d05dd557613e39ed2deeee8e9ca0c4d548d"
dependencies = [
"libc",
"pyo3-build-config",
@ -322,9 +320,9 @@ dependencies = [
[[package]]
name = "pyo3-macros"
version = "0.23.3"
version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdb6da8ec6fa5cedd1626c886fc8749bdcbb09424a86461eb8cdf096b7c33257"
checksum = "91871864b353fd5ffcb3f91f2f703a22a9797c91b9ab497b1acac7b07ae509c7"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@ -334,9 +332,9 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
version = "0.23.3"
version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38a385202ff5a92791168b1136afae5059d3ac118457bb7bc304c197c2d33e7d"
checksum = "43abc3b80bc20f3facd86cd3c60beed58c3e2aa26213f3cda368de39c60a27e4"
dependencies = [
"heck",
"proc-macro2",
@ -431,18 +429,18 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
[[package]]
name = "serde"
version = "1.0.216"
version = "1.0.217"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e"
checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.216"
version = "1.0.217"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e"
checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0"
dependencies = [
"proc-macro2",
"quote",
@ -451,9 +449,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.134"
version = "1.0.137"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d"
checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b"
dependencies = [
"itoa",
"memchr",
@ -538,11 +536,10 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "ulid"
version = "1.1.3"
version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289"
checksum = "f294bff79170ed1c5633812aff1e565c35d993a36e757f9bc0accf5eec4e6045"
dependencies = [
"getrandom",
"rand",
"web-time",
]

View file

@ -1 +0,0 @@
Update Alpine Linux Synapse Package Maintainer within installation.md.

View file

@ -1 +0,0 @@
Added the `email.tlsname` config option. This allows specifying the domain name used to validate the SMTP server's TLS certificate separately from the `email.smtp_host` to connect to.

View file

@ -1 +0,0 @@
Module developers will have access to user id of requester when adding `check_username_for_spam` callbacks to `spam_checker_module_callbacks`. Contributed by Wilson@Pangea.chat.

View file

@ -1 +0,0 @@
Fix bug when rejecting withdrew invite with a third_party_rules module, where the invite would be stuck for the client.

View file

@ -1,3 +0,0 @@
Add endpoints to Admin API to fetch the number of invites the provided user has sent after a given timestamp,
fetch the number of rooms the provided user has joined after a given timestamp, and get report IDs of event
reports against a provided user (ie where the user was the sender of the reported event).

View file

@ -1 +0,0 @@
Update `synapse.app.generic_worker` documentation to only recommend `GET` requests for stream writer routes by default, unless the worker is also configured as a stream writer. Contributed by @evoL.

View file

@ -1 +0,0 @@
Support stable account suspension from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823).

View file

@ -1 +0,0 @@
Add previously-undocumented `last_seen_ts` to query user admin API.

View file

@ -1 +0,0 @@
Add `macaroon_secret_key_path` config option.

View file

@ -1 +0,0 @@
Improve documentation for the `TaskScheduler` class.

View file

@ -1 +0,0 @@
Fix example in reverse proxy docs to include server port.

View file

@ -1 +0,0 @@
Add `RoomID` & `EventID` rust types.

View file

@ -1 +0,0 @@
Fix various type errors across the codebase.

View file

@ -1 +0,0 @@
Bump mypy from 1.11.2 to 1.12.1.

View file

@ -1 +0,0 @@
Disable DB statement timeout when doing a purge room since it can be quite long.

View file

@ -1 +0,0 @@
Remove some remaining uses of `twisted.internet.defer.returnValue`. Contributed by Colin Watson.

View file

@ -1 +0,0 @@
Properly purge state groups tables when purging a room with the admin API.

View file

@ -1 +0,0 @@
Fix a bug preventing the admin redaction endpoint from working on messages from remote users.

View file

@ -1 +0,0 @@
Remove support for PostgreSQL 11 and 12. Contributed by @clokep.

View file

@ -1 +0,0 @@
Fix a bug preventing the admin redaction endpoint from working on messages from remote users.

View file

@ -1 +0,0 @@
Refactor `get_profile` to no longer include fields with a value of `None`.

View file

@ -51,7 +51,7 @@ services:
- traefik.http.routers.https-synapse.tls.certResolver=le-ssl
db:
image: docker.io/postgres:12-alpine
image: docker.io/postgres:15-alpine
# Change that password, of course!
environment:
- POSTGRES_USER=synapse

30
debian/changelog vendored
View file

@ -1,3 +1,33 @@
matrix-synapse-py3 (1.124.0~rc1) stable; urgency=medium
* New Synapse release 1.124.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Feb 2025 11:53:05 +0000
matrix-synapse-py3 (1.123.0) stable; urgency=medium
* New Synapse release 1.123.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jan 2025 08:37:34 -0700
matrix-synapse-py3 (1.123.0~rc1) stable; urgency=medium
* New Synapse release 1.123.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Jan 2025 14:39:57 +0100
matrix-synapse-py3 (1.122.0) stable; urgency=medium
* New Synapse release 1.122.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Jan 2025 14:14:14 +0000
matrix-synapse-py3 (1.122.0~rc1) stable; urgency=medium
* New Synapse release 1.122.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Jan 2025 14:06:19 +0000
matrix-synapse-py3 (1.121.1) stable; urgency=medium
* New Synapse release 1.121.1.

View file

@ -85,6 +85,14 @@ rc_invites:
per_user:
per_second: 1000
burst_count: 1000
per_issuer:
per_second: 1000
burst_count: 1000
rc_presence:
per_user:
per_second: 9999
burst_count: 9999
federation_rr_transactions_per_room_per_second: 9999

View file

@ -385,6 +385,13 @@ The API is:
GET /_synapse/admin/v1/rooms/<room_id>/state
```
**Parameters**
The following query parameter is available:
* `type` - The type of room state event to filter by, eg "m.room.create". If provided, only state events
of this type will be returned (regardless of their `state_key` value).
A response body like the following is returned:
```json

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1868,6 +1868,27 @@ rc_federation:
concurrent: 5
```
---
### `rc_presence`
This option sets ratelimiting for presence.
The `rc_presence.per_user` option sets rate limits on how often a specific
users' presence updates are evaluated. Ratelimited presence updates sent via sync are
ignored, and no error is returned to the client.
This option also sets the rate limit for the
[`PUT /_matrix/client/v3/presence/{userId}/status`](https://spec.matrix.org/latest/client-server-api/#put_matrixclientv3presenceuseridstatus)
endpoint.
`per_user` defaults to `per_second: 0.1`, `burst_count: 1`.
Example configuration:
```yaml
rc_presence:
per_user:
per_second: 0.05
burst_count: 0.5
```
---
### `federation_rr_transactions_per_room_per_second`
Sets outgoing federation transaction frequency for sending read-receipts,
@ -4465,6 +4486,10 @@ instance_map:
worker1:
host: localhost
port: 8034
other:
host: localhost
port: 8035
tls: true
```
Example configuration(#2, for UNIX sockets):
```yaml

262
poetry.lock generated
View file

@ -842,13 +842,13 @@ trio = ["async_generator", "trio"]
[[package]]
name = "jinja2"
version = "3.1.4"
version = "3.1.5"
description = "A very fast and expressive template engine."
optional = false
python-versions = ">=3.7"
files = [
{file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"},
{file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"},
{file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"},
{file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"},
]
[package.dependencies]
@ -1314,43 +1314,43 @@ files = [
[[package]]
name = "mypy"
version = "1.12.1"
version = "1.13.0"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "mypy-1.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3d7d4371829184e22fda4015278fbfdef0327a4b955a483012bd2d423a788801"},
{file = "mypy-1.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f59f1dfbf497d473201356966e353ef09d4daec48caeacc0254db8ef633a28a5"},
{file = "mypy-1.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b947097fae68004b8328c55161ac9db7d3566abfef72d9d41b47a021c2fba6b1"},
{file = "mypy-1.12.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:96af62050971c5241afb4701c15189ea9507db89ad07794a4ee7b4e092dc0627"},
{file = "mypy-1.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:d90da248f4c2dba6c44ddcfea94bb361e491962f05f41990ff24dbd09969ce20"},
{file = "mypy-1.12.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1230048fec1380faf240be6385e709c8570604d2d27ec6ca7e573e3bc09c3735"},
{file = "mypy-1.12.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:02dcfe270c6ea13338210908f8cadc8d31af0f04cee8ca996438fe6a97b4ec66"},
{file = "mypy-1.12.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a5a437c9102a6a252d9e3a63edc191a3aed5f2fcb786d614722ee3f4472e33f6"},
{file = "mypy-1.12.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:186e0c8346efc027ee1f9acf5ca734425fc4f7dc2b60144f0fbe27cc19dc7931"},
{file = "mypy-1.12.1-cp311-cp311-win_amd64.whl", hash = "sha256:673ba1140a478b50e6d265c03391702fa11a5c5aff3f54d69a62a48da32cb811"},
{file = "mypy-1.12.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9fb83a7be97c498176fb7486cafbb81decccaef1ac339d837c377b0ce3743a7f"},
{file = "mypy-1.12.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:389e307e333879c571029d5b93932cf838b811d3f5395ed1ad05086b52148fb0"},
{file = "mypy-1.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:94b2048a95a21f7a9ebc9fbd075a4fcd310410d078aa0228dbbad7f71335e042"},
{file = "mypy-1.12.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ee5932370ccf7ebf83f79d1c157a5929d7ea36313027b0d70a488493dc1b179"},
{file = "mypy-1.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:19bf51f87a295e7ab2894f1d8167622b063492d754e69c3c2fed6563268cb42a"},
{file = "mypy-1.12.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d34167d43613ffb1d6c6cdc0cc043bb106cac0aa5d6a4171f77ab92a3c758bcc"},
{file = "mypy-1.12.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:427878aa54f2e2c5d8db31fa9010c599ed9f994b3b49e64ae9cd9990c40bd635"},
{file = "mypy-1.12.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5fcde63ea2c9f69d6be859a1e6dd35955e87fa81de95bc240143cf00de1f7f81"},
{file = "mypy-1.12.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d54d840f6c052929f4a3d2aab2066af0f45a020b085fe0e40d4583db52aab4e4"},
{file = "mypy-1.12.1-cp313-cp313-win_amd64.whl", hash = "sha256:20db6eb1ca3d1de8ece00033b12f793f1ea9da767334b7e8c626a4872090cf02"},
{file = "mypy-1.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b16fe09f9c741d85a2e3b14a5257a27a4f4886c171d562bc5a5e90d8591906b8"},
{file = "mypy-1.12.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0dcc1e843d58f444fce19da4cce5bd35c282d4bde232acdeca8279523087088a"},
{file = "mypy-1.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e10ba7de5c616e44ad21005fa13450cd0de7caaa303a626147d45307492e4f2d"},
{file = "mypy-1.12.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0e6fe449223fa59fbee351db32283838a8fee8059e0028e9e6494a03802b4004"},
{file = "mypy-1.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:dc6e2a2195a290a7fd5bac3e60b586d77fc88e986eba7feced8b778c373f9afe"},
{file = "mypy-1.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:de5b2a8988b4e1269a98beaf0e7cc71b510d050dce80c343b53b4955fff45f19"},
{file = "mypy-1.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:843826966f1d65925e8b50d2b483065c51fc16dc5d72647e0236aae51dc8d77e"},
{file = "mypy-1.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9fe20f89da41a95e14c34b1ddb09c80262edcc295ad891f22cc4b60013e8f78d"},
{file = "mypy-1.12.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8135ffec02121a75f75dc97c81af7c14aa4ae0dda277132cfcd6abcd21551bfd"},
{file = "mypy-1.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:a7b76fa83260824300cc4834a3ab93180db19876bce59af921467fd03e692810"},
{file = "mypy-1.12.1-py3-none-any.whl", hash = "sha256:ce561a09e3bb9863ab77edf29ae3a50e65685ad74bba1431278185b7e5d5486e"},
{file = "mypy-1.12.1.tar.gz", hash = "sha256:f5b3936f7a6d0e8280c9bdef94c7ce4847f5cdfc258fbb2c29a8c1711e8bb96d"},
{file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
{file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
{file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"},
{file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"},
{file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"},
{file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"},
{file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"},
{file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"},
{file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"},
{file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"},
{file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"},
{file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"},
{file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"},
{file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"},
{file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"},
{file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"},
{file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"},
{file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"},
{file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"},
{file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"},
{file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"},
{file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"},
{file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"},
{file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"},
{file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"},
{file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"},
{file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"},
{file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"},
{file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"},
{file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"},
{file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"},
{file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"},
]
[package.dependencies]
@ -1360,6 +1360,7 @@ typing-extensions = ">=4.6.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
faster-cache = ["orjson"]
install-types = ["pip"]
mypyc = ["setuptools (>=50)"]
reports = ["lxml"]
@ -1459,93 +1460,89 @@ files = [
[[package]]
name = "pillow"
version = "11.0.0"
version = "11.1.0"
description = "Python Imaging Library (Fork)"
optional = false
python-versions = ">=3.9"
files = [
{file = "pillow-11.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:6619654954dc4936fcff82db8eb6401d3159ec6be81e33c6000dfd76ae189947"},
{file = "pillow-11.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b3c5ac4bed7519088103d9450a1107f76308ecf91d6dabc8a33a2fcfb18d0fba"},
{file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a65149d8ada1055029fcb665452b2814fe7d7082fcb0c5bed6db851cb69b2086"},
{file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a58d8ac0cc0e7f3a014509f0455248a76629ca9b604eca7dc5927cc593c5e9"},
{file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c26845094b1af3c91852745ae78e3ea47abf3dbcd1cf962f16b9a5fbe3ee8488"},
{file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:1a61b54f87ab5786b8479f81c4b11f4d61702830354520837f8cc791ebba0f5f"},
{file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:674629ff60030d144b7bca2b8330225a9b11c482ed408813924619c6f302fdbb"},
{file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:598b4e238f13276e0008299bd2482003f48158e2b11826862b1eb2ad7c768b97"},
{file = "pillow-11.0.0-cp310-cp310-win32.whl", hash = "sha256:9a0f748eaa434a41fccf8e1ee7a3eed68af1b690e75328fd7a60af123c193b50"},
{file = "pillow-11.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a5629742881bcbc1f42e840af185fd4d83a5edeb96475a575f4da50d6ede337c"},
{file = "pillow-11.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:ee217c198f2e41f184f3869f3e485557296d505b5195c513b2bfe0062dc537f1"},
{file = "pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc"},
{file = "pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a"},
{file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3"},
{file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5"},
{file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b"},
{file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa"},
{file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306"},
{file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9"},
{file = "pillow-11.0.0-cp311-cp311-win32.whl", hash = "sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5"},
{file = "pillow-11.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291"},
{file = "pillow-11.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9"},
{file = "pillow-11.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923"},
{file = "pillow-11.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903"},
{file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4"},
{file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f"},
{file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"},
{file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7"},
{file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6"},
{file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc"},
{file = "pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6"},
{file = "pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47"},
{file = "pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25"},
{file = "pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699"},
{file = "pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38"},
{file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2"},
{file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2"},
{file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527"},
{file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa"},
{file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f"},
{file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb"},
{file = "pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798"},
{file = "pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de"},
{file = "pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84"},
{file = "pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b"},
{file = "pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003"},
{file = "pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2"},
{file = "pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a"},
{file = "pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8"},
{file = "pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8"},
{file = "pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904"},
{file = "pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3"},
{file = "pillow-11.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2e46773dc9f35a1dd28bd6981332fd7f27bec001a918a72a79b4133cf5291dba"},
{file = "pillow-11.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2679d2258b7f1192b378e2893a8a0a0ca472234d4c2c0e6bdd3380e8dfa21b6a"},
{file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda2616eb2313cbb3eebbe51f19362eb434b18e3bb599466a1ffa76a033fb916"},
{file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ec184af98a121fb2da42642dea8a29ec80fc3efbaefb86d8fdd2606619045d"},
{file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:8594f42df584e5b4bb9281799698403f7af489fba84c34d53d1c4bfb71b7c4e7"},
{file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:c12b5ae868897c7338519c03049a806af85b9b8c237b7d675b8c5e089e4a618e"},
{file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:70fbbdacd1d271b77b7721fe3cdd2d537bbbd75d29e6300c672ec6bb38d9672f"},
{file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5178952973e588b3f1360868847334e9e3bf49d19e169bbbdfaf8398002419ae"},
{file = "pillow-11.0.0-cp39-cp39-win32.whl", hash = "sha256:8c676b587da5673d3c75bd67dd2a8cdfeb282ca38a30f37950511766b26858c4"},
{file = "pillow-11.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:94f3e1780abb45062287b4614a5bc0874519c86a777d4a7ad34978e86428b8dd"},
{file = "pillow-11.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:290f2cc809f9da7d6d622550bbf4c1e57518212da51b6a30fe8e0a270a5b78bd"},
{file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2"},
{file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2"},
{file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b"},
{file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21a0d3b115009ebb8ac3d2ebec5c2982cc693da935f4ab7bb5c8ebe2f47d36f2"},
{file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:73853108f56df97baf2bb8b522f3578221e56f646ba345a372c78326710d3830"},
{file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e58876c91f97b0952eb766123bfef372792ab3f4e3e1f1a2267834c2ab131734"},
{file = "pillow-11.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:224aaa38177597bb179f3ec87eeefcce8e4f85e608025e9cfac60de237ba6316"},
{file = "pillow-11.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bd2d3bdb846d757055910f0a59792d33b555800813c3b39ada1829c372ccb06"},
{file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:375b8dd15a1f5d2feafff536d47e22f69625c1aa92f12b339ec0b2ca40263273"},
{file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:daffdf51ee5db69a82dd127eabecce20729e21f7a3680cf7cbb23f0829189790"},
{file = "pillow-11.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7326a1787e3c7b0429659e0a944725e1b03eeaa10edd945a86dead1913383944"},
{file = "pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739"},
{file = "pillow-11.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8"},
{file = "pillow-11.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192"},
{file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07dba04c5e22824816b2615ad7a7484432d7f540e6fa86af60d2de57b0fcee2"},
{file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e267b0ed063341f3e60acd25c05200df4193e15a4a5807075cd71225a2386e26"},
{file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bd165131fd51697e22421d0e467997ad31621b74bfc0b75956608cb2906dda07"},
{file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:abc56501c3fd148d60659aae0af6ddc149660469082859fa7b066a298bde9482"},
{file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:54ce1c9a16a9561b6d6d8cb30089ab1e5eb66918cb47d457bd996ef34182922e"},
{file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:73ddde795ee9b06257dac5ad42fcb07f3b9b813f8c1f7f870f402f4dc54b5269"},
{file = "pillow-11.1.0-cp310-cp310-win32.whl", hash = "sha256:3a5fe20a7b66e8135d7fd617b13272626a28278d0e578c98720d9ba4b2439d49"},
{file = "pillow-11.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6123aa4a59d75f06e9dd3dac5bf8bc9aa383121bb3dd9a7a612e05eabc9961a"},
{file = "pillow-11.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:a76da0a31da6fcae4210aa94fd779c65c75786bc9af06289cd1c184451ef7a65"},
{file = "pillow-11.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e06695e0326d05b06833b40b7ef477e475d0b1ba3a6d27da1bb48c23209bf457"},
{file = "pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96f82000e12f23e4f29346e42702b6ed9a2f2fea34a740dd5ffffcc8c539eb35"},
{file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3cd561ded2cf2bbae44d4605837221b987c216cff94f49dfeed63488bb228d2"},
{file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f189805c8be5ca5add39e6f899e6ce2ed824e65fb45f3c28cb2841911da19070"},
{file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dd0052e9db3474df30433f83a71b9b23bd9e4ef1de13d92df21a52c0303b8ab6"},
{file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:837060a8599b8f5d402e97197d4924f05a2e0d68756998345c829c33186217b1"},
{file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:aa8dd43daa836b9a8128dbe7d923423e5ad86f50a7a14dc688194b7be5c0dea2"},
{file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0a2f91f8a8b367e7a57c6e91cd25af510168091fb89ec5146003e424e1558a96"},
{file = "pillow-11.1.0-cp311-cp311-win32.whl", hash = "sha256:c12fc111ef090845de2bb15009372175d76ac99969bdf31e2ce9b42e4b8cd88f"},
{file = "pillow-11.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd43429d0d7ed6533b25fc993861b8fd512c42d04514a0dd6337fb3ccf22761"},
{file = "pillow-11.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f7955ecf5609dee9442cbface754f2c6e541d9e6eda87fad7f7a989b0bdb9d71"},
{file = "pillow-11.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2062ffb1d36544d42fcaa277b069c88b01bb7298f4efa06731a7fd6cc290b81a"},
{file = "pillow-11.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a85b653980faad27e88b141348707ceeef8a1186f75ecc600c395dcac19f385b"},
{file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9409c080586d1f683df3f184f20e36fb647f2e0bc3988094d4fd8c9f4eb1b3b3"},
{file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fdadc077553621911f27ce206ffcbec7d3f8d7b50e0da39f10997e8e2bb7f6a"},
{file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:93a18841d09bcdd774dcdc308e4537e1f867b3dec059c131fde0327899734aa1"},
{file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9aa9aeddeed452b2f616ff5507459e7bab436916ccb10961c4a382cd3e03f47f"},
{file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3cdcdb0b896e981678eee140d882b70092dac83ac1cdf6b3a60e2216a73f2b91"},
{file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36ba10b9cb413e7c7dfa3e189aba252deee0602c86c309799da5a74009ac7a1c"},
{file = "pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6"},
{file = "pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf"},
{file = "pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5"},
{file = "pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc"},
{file = "pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0"},
{file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1"},
{file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec"},
{file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5"},
{file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114"},
{file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352"},
{file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3"},
{file = "pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9"},
{file = "pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c"},
{file = "pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65"},
{file = "pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861"},
{file = "pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081"},
{file = "pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c"},
{file = "pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547"},
{file = "pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab"},
{file = "pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9"},
{file = "pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe"},
{file = "pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756"},
{file = "pillow-11.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:bf902d7413c82a1bfa08b06a070876132a5ae6b2388e2712aab3a7cbc02205c6"},
{file = "pillow-11.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c1eec9d950b6fe688edee07138993e54ee4ae634c51443cfb7c1e7613322718e"},
{file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e275ee4cb11c262bd108ab2081f750db2a1c0b8c12c1897f27b160c8bd57bbc"},
{file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db853948ce4e718f2fc775b75c37ba2efb6aaea41a1a5fc57f0af59eee774b2"},
{file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ab8a209b8485d3db694fa97a896d96dd6533d63c22829043fd9de627060beade"},
{file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:54251ef02a2309b5eec99d151ebf5c9904b77976c8abdcbce7891ed22df53884"},
{file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5bb94705aea800051a743aa4874bb1397d4695fb0583ba5e425ee0328757f196"},
{file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89dbdb3e6e9594d512780a5a1c42801879628b38e3efc7038094430844e271d8"},
{file = "pillow-11.1.0-cp39-cp39-win32.whl", hash = "sha256:e5449ca63da169a2e6068dd0e2fcc8d91f9558aba89ff6d02121ca8ab11e79e5"},
{file = "pillow-11.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3362c6ca227e65c54bf71a5f88b3d4565ff1bcbc63ae72c34b07bbb1cc59a43f"},
{file = "pillow-11.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:b20be51b37a75cc54c2c55def3fa2c65bb94ba859dde241cd0a4fd302de5ae0a"},
{file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8c730dc3a83e5ac137fbc92dfcfe1511ce3b2b5d7578315b63dbbb76f7f51d90"},
{file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d33d2fae0e8b170b6a6c57400e077412240f6f5bb2a342cf1ee512a787942bb"},
{file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8d65b38173085f24bc07f8b6c505cbb7418009fa1a1fcb111b1f4961814a442"},
{file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:015c6e863faa4779251436db398ae75051469f7c903b043a48f078e437656f83"},
{file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d44ff19eea13ae4acdaaab0179fa68c0c6f2f45d66a4d8ec1eda7d6cecbcc15f"},
{file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d3d8da4a631471dfaf94c10c85f5277b1f8e42ac42bade1ac67da4b4a7359b73"},
{file = "pillow-11.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4637b88343166249fe8aa94e7c4a62a180c4b3898283bb5d3d2fd5fe10d8e4e0"},
{file = "pillow-11.1.0.tar.gz", hash = "sha256:368da70808b36d73b4b390a8ffac11069f8a5c85f29eff1f1b01bcf3ef5b2a20"},
]
[package.extras]
docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
fpx = ["olefile"]
mic = ["olefile"]
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"]
typing = ["typing-extensions"]
xmp = ["defusedxml"]
@ -1590,6 +1587,7 @@ files = [
{file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
{file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
{file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
{file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"},
{file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
{file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
{file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
@ -1825,12 +1823,12 @@ plugins = ["importlib-metadata"]
[[package]]
name = "pyicu"
version = "2.13.1"
version = "2.14"
description = "Python extension wrapping the ICU C++ API"
optional = true
python-versions = "*"
files = [
{file = "PyICU-2.13.1.tar.gz", hash = "sha256:d4919085eaa07da12bade8ee721e7bbf7ade0151ca0f82946a26c8f4b98cdceb"},
{file = "PyICU-2.14.tar.gz", hash = "sha256:acc7eb92bd5c554ed577249c6978450a4feda0aa6f01470152b3a7b382a02132"},
]
[[package]]
@ -1907,20 +1905,20 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
name = "pyopenssl"
version = "24.2.1"
version = "24.3.0"
description = "Python wrapper module around the OpenSSL library"
optional = false
python-versions = ">=3.7"
files = [
{file = "pyOpenSSL-24.2.1-py3-none-any.whl", hash = "sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d"},
{file = "pyopenssl-24.2.1.tar.gz", hash = "sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95"},
{file = "pyOpenSSL-24.3.0-py3-none-any.whl", hash = "sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a"},
{file = "pyopenssl-24.3.0.tar.gz", hash = "sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36"},
]
[package.dependencies]
cryptography = ">=41.0.5,<44"
cryptography = ">=41.0.5,<45"
[package.extras]
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"]
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"]
test = ["pretend", "pytest (>=3.0.1)", "pytest-rerunfailures"]
[[package]]
@ -1962,13 +1960,13 @@ six = ">=1.5"
[[package]]
name = "python-multipart"
version = "0.0.18"
version = "0.0.20"
description = "A streaming multipart parser for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "python_multipart-0.0.18-py3-none-any.whl", hash = "sha256:efe91480f485f6a361427a541db4796f9e1591afc0fb8e7a4ba06bfbc6708996"},
{file = "python_multipart-0.0.18.tar.gz", hash = "sha256:7a68db60c8bfb82e460637fa4750727b45af1d5e2ed215593f917f64694d34fe"},
{file = "python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104"},
{file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"},
]
[[package]]
@ -2321,13 +2319,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
version = "2.17.0"
version = "2.19.2"
description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
files = [
{file = "sentry_sdk-2.17.0-py2.py3-none-any.whl", hash = "sha256:625955884b862cc58748920f9e21efdfb8e0d4f98cca4ab0d3918576d5b606ad"},
{file = "sentry_sdk-2.17.0.tar.gz", hash = "sha256:dd0a05352b78ffeacced73a94e86f38b32e2eae15fff5f30ca5abb568a72eacf"},
{file = "sentry_sdk-2.19.2-py2.py3-none-any.whl", hash = "sha256:ebdc08228b4d131128e568d696c210d846e5b9d70aa0327dec6b1272d9d40b84"},
{file = "sentry_sdk-2.19.2.tar.gz", hash = "sha256:467df6e126ba242d39952375dd816fbee0f217d119bf454a8ce74cf1e7909e8d"},
]
[package.dependencies]
@ -2353,14 +2351,16 @@ grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"]
http2 = ["httpcore[http2] (==1.*)"]
httpx = ["httpx (>=0.16.0)"]
huey = ["huey (>=2)"]
huggingface-hub = ["huggingface-hub (>=0.22)"]
huggingface-hub = ["huggingface_hub (>=0.22)"]
langchain = ["langchain (>=0.0.210)"]
launchdarkly = ["launchdarkly-server-sdk (>=9.8.0)"]
litestar = ["litestar (>=2.0.0)"]
loguru = ["loguru (>=0.5)"]
openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"]
openfeature = ["openfeature-sdk (>=0.7.1)"]
opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
opentelemetry-experimental = ["opentelemetry-distro"]
pure-eval = ["asttokens", "executing", "pure-eval"]
pure-eval = ["asttokens", "executing", "pure_eval"]
pymongo = ["pymongo (>=3.1)"]
pyspark = ["pyspark (>=2.4.4)"]
quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
@ -2706,13 +2706,13 @@ twisted = "*"
[[package]]
name = "types-bleach"
version = "6.1.0.20240331"
version = "6.2.0.20241123"
description = "Typing stubs for bleach"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-bleach-6.1.0.20240331.tar.gz", hash = "sha256:2ee858a84fb06fc2225ff56ba2f7f6c88b65638659efae0d7bfd6b24a1b5a524"},
{file = "types_bleach-6.1.0.20240331-py3-none-any.whl", hash = "sha256:399bc59bfd20a36a56595f13f805e56c8a08e5a5c07903e5cf6fafb5a5107dd4"},
{file = "types_bleach-6.2.0.20241123-py3-none-any.whl", hash = "sha256:c6e58b3646665ca7c6b29890375390f4569e84f0cf5c171e0fe1ddb71a7be86a"},
{file = "types_bleach-6.2.0.20241123.tar.gz", hash = "sha256:dac5fe9015173514da3ac810c1a935619a3ccbcc5d66c4cbf4707eac00539057"},
]
[package.dependencies]

View file

@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.121.1"
version = "1.124.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"

View file

@ -174,6 +174,12 @@ class MSC3861DelegatedAuth(BaseAuth):
logger.warning("Failed to load metadata:", exc_info=True)
return None
async def auth_metadata(self) -> Dict[str, Any]:
"""
Returns the auth metadata dict
"""
return await self._issuer_metadata.get()
async def _introspection_endpoint(self) -> str:
"""
Returns the introspection endpoint of the issuer

View file

@ -132,6 +132,10 @@ class Codes(str, Enum):
# connection.
UNKNOWN_POS = "M_UNKNOWN_POS"
# Part of MSC4133
PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE"
KEY_TOO_LARGE = "M_KEY_TOO_LARGE"
class CodeMessageException(RuntimeError):
"""An exception with integer code, a message string attributes and optional headers.

View file

@ -275,6 +275,7 @@ class Ratelimiter:
update: bool = True,
n_actions: int = 1,
_time_now_s: Optional[float] = None,
pause: Optional[float] = 0.5,
) -> None:
"""Checks if an action can be performed. If not, raises a LimitExceededError
@ -298,6 +299,8 @@ class Ratelimiter:
at all.
_time_now_s: The current time. Optional, defaults to the current time according
to self.clock. Only used by tests.
pause: Time in seconds to pause when an action is being limited. Defaults to 0.5
to stop clients from "tight-looping" on retrying their request.
Raises:
LimitExceededError: If an action could not be performed, along with the time in
@ -316,9 +319,8 @@ class Ratelimiter:
)
if not allowed:
# We pause for a bit here to stop clients from "tight-looping" on
# retrying their request.
await self.clock.sleep(0.5)
if pause:
await self.clock.sleep(pause)
raise LimitExceededError(
limiter_name=self._limiter_name,

View file

@ -221,9 +221,13 @@ class Config:
The number of milliseconds in the duration.
Raises:
TypeError, if given something other than an integer or a string
TypeError: if given something other than an integer or a string, or the
duration is using an incorrect suffix.
ValueError: if given a string not of the form described above.
"""
# For integers, we prefer to use `type(value) is int` instead of
# `isinstance(value, int)` because we want to exclude subclasses of int, such as
# bool.
if type(value) is int: # noqa: E721
return value
elif isinstance(value, str):
@ -246,9 +250,20 @@ class Config:
if suffix in sizes:
value = value[:-1]
size = sizes[suffix]
elif suffix.isdigit():
# No suffix is treated as milliseconds.
value = value
size = 1
else:
raise TypeError(
f"Bad duration suffix {value} (expected no suffix or one of these suffixes: {sizes.keys()})"
)
return int(value) * size
else:
raise TypeError(f"Bad duration {value!r}")
raise TypeError(
f"Bad duration type {value!r} (expected int or string duration)"
)
@staticmethod
def abspath(file_path: str) -> str:

View file

@ -436,8 +436,8 @@ class ExperimentalConfig(Config):
("experimental", "msc4108_delegation_endpoint"),
)
# MSC4151: Report room API (Client-Server API)
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
# MSC4133: Custom profile fields
self.msc4133_enabled: bool = experimental.get("msc4133_enabled", False)
# MSC4210: Remove legacy mentions
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)

View file

@ -228,3 +228,9 @@ class RatelimitConfig(Config):
config.get("remote_media_download_burst_count", "500M")
),
)
self.rc_presence_per_user = RatelimitSettings.parse(
config,
"rc_presence.per_user",
defaults={"per_second": 0.1, "burst_count": 1},
)

View file

@ -566,6 +566,7 @@ def _is_membership_change_allowed(
logger.debug(
"_is_membership_change_allowed: %s",
{
"caller_membership": caller.membership if caller else None,
"caller_in_room": caller_in_room,
"caller_invited": caller_invited,
"caller_knocked": caller_knocked,
@ -677,7 +678,8 @@ def _is_membership_change_allowed(
and join_rule == JoinRules.KNOCK_RESTRICTED
)
):
if not caller_in_room and not caller_invited:
# You can only join the room if you are invited or are already in the room.
if not (caller_in_room or caller_invited):
raise AuthError(403, "You are not invited to this room.")
else:
# TODO (erikj): may_join list

View file

@ -42,7 +42,7 @@ import attr
from typing_extensions import Literal
from unpaddedbase64 import encode_base64
from synapse.api.constants import RelationTypes
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
from synapse.synapse_rust.events import EventInternalMetadata
from synapse.types import JsonDict, StrCollection
@ -325,12 +325,17 @@ class EventBase(metaclass=abc.ABCMeta):
def __repr__(self) -> str:
rejection = f"REJECTED={self.rejected_reason}, " if self.rejected_reason else ""
conditional_membership_string = ""
if self.get("type") == EventTypes.Member:
conditional_membership_string = f"membership={self.membership}, "
return (
f"<{self.__class__.__name__} "
f"{rejection}"
f"event_id={self.event_id}, "
f"type={self.get('type')}, "
f"state_key={self.get('state_key')}, "
f"{conditional_membership_string}"
f"outlier={self.internal_metadata.is_outlier()}"
">"
)

View file

@ -66,50 +66,67 @@ class InviteAutoAccepter:
event: The incoming event.
"""
# Check if the event is an invite for a local user.
is_invite_for_local_user = (
event.type == EventTypes.Member
and event.is_state()
and event.membership == Membership.INVITE
and self._api.is_mine(event.state_key)
)
if (
event.type != EventTypes.Member
or event.is_state() is False
or event.membership != Membership.INVITE
or self._api.is_mine(event.state_key) is False
):
return
# Only accept invites for direct messages if the configuration mandates it.
is_direct_message = event.content.get("is_direct", False)
is_allowed_by_direct_message_rules = (
not self._config.accept_invites_only_for_direct_messages
or is_direct_message is True
)
if (
self._config.accept_invites_only_for_direct_messages
and is_direct_message is False
):
return
# Only accept invites from remote users if the configuration mandates it.
is_from_local_user = self._api.is_mine(event.sender)
is_allowed_by_local_user_rules = (
not self._config.accept_invites_only_from_local_users
or is_from_local_user is True
if (
self._config.accept_invites_only_from_local_users
and is_from_local_user is False
):
return
# Check the user is activated.
recipient = await self._api.get_userinfo_by_id(event.state_key)
# Ignore if the user doesn't exist.
if recipient is None:
return
# Never accept invites for deactivated users.
if recipient.is_deactivated:
return
# Never accept invites for suspended users.
if recipient.suspended:
return
# Never accept invites for locked users.
if recipient.locked:
return
# Make the user join the room. We run this as a background process to circumvent a race condition
# that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12)
run_as_background_process(
"retry_make_join",
self._retry_make_join,
event.state_key,
event.state_key,
event.room_id,
"join",
bg_start_span=False,
)
if (
is_invite_for_local_user
and is_allowed_by_direct_message_rules
and is_allowed_by_local_user_rules
):
# Make the user join the room. We run this as a background process to circumvent a race condition
# that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12)
run_as_background_process(
"retry_make_join",
self._retry_make_join,
event.state_key,
event.state_key,
event.room_id,
"join",
bg_start_span=False,
if is_direct_message:
# Mark this room as a direct message!
await self._mark_room_as_direct_message(
event.state_key, event.sender, event.room_id
)
if is_direct_message:
# Mark this room as a direct message!
await self._mark_room_as_direct_message(
event.state_key, event.sender, event.room_id
)
async def _mark_room_as_direct_message(
self, user_id: str, dm_user_id: str, room_id: str
) -> None:

View file

@ -24,7 +24,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import attr
from signedjson.types import SigningKey
from synapse.api.constants import MAX_DEPTH
from synapse.api.constants import MAX_DEPTH, EventTypes
from synapse.api.room_versions import (
KNOWN_EVENT_FORMAT_VERSIONS,
EventFormatVersions,
@ -109,6 +109,19 @@ class EventBuilder:
def is_state(self) -> bool:
return self._state_key is not None
def is_mine_id(self, user_id: str) -> bool:
"""Determines whether a user ID or room alias originates from this homeserver.
Returns:
`True` if the hostname part of the user ID or room alias matches this
homeserver.
`False` otherwise, or if the user ID or room alias is malformed.
"""
localpart_hostname = user_id.split(":", 1)
if len(localpart_hostname) < 2:
return False
return localpart_hostname[1] == self._hostname
async def build(
self,
prev_event_ids: List[str],
@ -142,6 +155,46 @@ class EventBuilder:
self, state_ids
)
# Check for out-of-band membership that may have been exposed on `/sync` but
# the events have not been de-outliered yet so they won't be part of the
# room state yet.
#
# This helps in situations where a remote homeserver invites a local user to
# a room that we're already participating in; and we've persisted the invite
# as an out-of-band membership (outlier), but it hasn't been pushed to us as
# part of a `/send` transaction yet and de-outliered. This also helps for
# any of the other out-of-band membership transitions.
#
# As an optimization, we could check if the room state already includes a
# non-`leave` membership event, then we can assume the membership event has
# been de-outliered and we don't need to check for an out-of-band
# membership. But we don't have the necessary information from a
# `StateMap[str]` and we'll just have to take the hit of this extra lookup
# for any membership event for now.
if self.type == EventTypes.Member and self.is_mine_id(self.state_key):
(
_membership,
member_event_id,
) = await self._store.get_local_current_membership_for_user_in_room(
user_id=self.state_key,
room_id=self.room_id,
)
# There is no need to check if the membership is actually an
# out-of-band membership (`outlier`) as we would end up with the
# same result either way (adding the member event to the
# `auth_event_ids`).
if (
member_event_id is not None
# We only need to be careful about duplicating the event in the
# `auth_event_ids` list (duplicate `type`/`state_key` is part of the
# authorization rules)
and member_event_id not in auth_event_ids
):
auth_event_ids.append(member_event_id)
# Also make sure to point to the previous membership event that will
# allow this one to happen so the computed state works out.
prev_event_ids.append(member_event_id)
format_version = self.room_version.event_format
# The types of auth/prev events changes between event versions.
prev_events: Union[StrCollection, List[Tuple[str, Dict[str, str]]]]

View file

@ -151,6 +151,8 @@ class FederationEventHandler:
def __init__(self, hs: "HomeServer"):
self._clock = hs.get_clock()
self._store = hs.get_datastores().main
self._state_store = hs.get_datastores().state
self._state_deletion_store = hs.get_datastores().state_deletion
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
@ -580,7 +582,9 @@ class FederationEventHandler:
room_version.identifier,
state_maps_to_resolve,
event_map=None,
state_res_store=StateResolutionStore(self._store),
state_res_store=StateResolutionStore(
self._store, self._state_deletion_store
),
)
)
else:
@ -1179,7 +1183,9 @@ class FederationEventHandler:
room_version,
state_maps,
event_map={event_id: event},
state_res_store=StateResolutionStore(self._store),
state_res_store=StateResolutionStore(
self._store, self._state_deletion_store
),
)
except Exception as e:
@ -1874,7 +1880,9 @@ class FederationEventHandler:
room_version,
[local_state_id_map, claimed_auth_events_id_map],
event_map=None,
state_res_store=StateResolutionStore(self._store),
state_res_store=StateResolutionStore(
self._store, self._state_deletion_store
),
)
)
else:
@ -2014,7 +2022,9 @@ class FederationEventHandler:
room_version,
state_sets,
event_map=None,
state_res_store=StateResolutionStore(self._store),
state_res_store=StateResolutionStore(
self._store, self._state_deletion_store
),
)
)
else:
@ -2272,8 +2282,9 @@ class FederationEventHandler:
event_and_contexts, backfilled=backfilled
)
# After persistence we always need to notify replication there may
# be new data.
# After persistence, we never notify clients (wake up `/sync` streams) about
# backfilled events but it's important to let all the workers know about any
# new event (backfilled or not) because TODO
self._notifier.notify_replication()
if self._ephemeral_messages_enabled:

View file

@ -1002,7 +1002,21 @@ class OidcProvider:
"""
state = generate_token()
nonce = generate_token()
# Generate a nonce 32 characters long. When encoded with base64url later on,
# the nonce will be 43 characters when sent to the identity provider.
#
# While RFC7636 does not specify a minimum length for the `nonce`
# parameter, the TI-Messenger IDP_FD spec v1.7.3 does require it to be
# between 43 and 128 characters. This spec concerns using Matrix for
# communication in German healthcare.
#
# As increasing the length only strengthens security, we use this length
# to allow TI-Messenger deployments using Synapse to satisfy this
# external spec.
#
# See https://github.com/element-hq/synapse/pull/18109 for more context.
nonce = generate_token(length=32)
code_verifier = ""
if not client_redirect_url:

View file

@ -32,7 +32,7 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia
from synapse.types import JsonDict, Requester, UserID, create_requester
from synapse.types import JsonDict, JsonValue, Requester, UserID, create_requester
from synapse.util.caches.descriptors import cached
from synapse.util.stringutils import parse_and_validate_mxc_uri
@ -43,6 +43,8 @@ logger = logging.getLogger(__name__)
MAX_DISPLAYNAME_LEN = 256
MAX_AVATAR_URL_LEN = 1000
# Field name length is specced at 255 bytes.
MAX_CUSTOM_FIELD_LEN = 255
class ProfileHandler:
@ -90,7 +92,15 @@ class ProfileHandler:
if self.hs.is_mine(target_user):
profileinfo = await self.store.get_profileinfo(target_user)
if profileinfo.display_name is None and profileinfo.avatar_url is None:
extra_fields = {}
if self.hs.config.experimental.msc4133_enabled:
extra_fields = await self.store.get_profile_fields(target_user)
if (
profileinfo.display_name is None
and profileinfo.avatar_url is None
and not extra_fields
):
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
# Do not include display name or avatar if unset.
@ -99,6 +109,9 @@ class ProfileHandler:
ret[ProfileFields.DISPLAYNAME] = profileinfo.display_name
if profileinfo.avatar_url is not None:
ret[ProfileFields.AVATAR_URL] = profileinfo.avatar_url
if extra_fields:
ret.update(extra_fields)
return ret
else:
try:
@ -403,6 +416,110 @@ class ProfileHandler:
return True
async def get_profile_field(
self, target_user: UserID, field_name: str
) -> JsonValue:
"""
Fetch a user's profile from the database for local users and over federation
for remote users.
Args:
target_user: The user ID to fetch the profile for.
field_name: The field to fetch the profile for.
Returns:
The value for the profile field or None if the field does not exist.
"""
if self.hs.is_mine(target_user):
try:
field_value = await self.store.get_profile_field(
target_user, field_name
)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
raise
return field_value
else:
try:
result = await self.federation.make_query(
destination=target_user.domain,
query_type="profile",
args={"user_id": target_user.to_string(), "field": field_name},
ignore_backoff=True,
)
except RequestSendFailed as e:
raise SynapseError(502, "Failed to fetch profile") from e
except HttpResponseException as e:
raise e.to_synapse_error()
return result.get(field_name)
async def set_profile_field(
self,
target_user: UserID,
requester: Requester,
field_name: str,
new_value: JsonValue,
by_admin: bool = False,
deactivation: bool = False,
) -> None:
"""Set a new profile field for a user.
Args:
target_user: the user whose profile is to be changed.
requester: The user attempting to make this change.
field_name: The name of the profile field to update.
new_value: The new field value for this user.
by_admin: Whether this change was made by an administrator.
deactivation: Whether this change was made while deactivating the user.
"""
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this homeserver")
if not by_admin and target_user != requester.user:
raise AuthError(403, "Cannot set another user's profile")
await self.store.set_profile_field(target_user, field_name, new_value)
# Custom fields do not propagate into the user directory *or* rooms.
profile = await self.store.get_profileinfo(target_user)
await self._third_party_rules.on_profile_update(
target_user.to_string(), profile, by_admin, deactivation
)
async def delete_profile_field(
self,
target_user: UserID,
requester: Requester,
field_name: str,
by_admin: bool = False,
deactivation: bool = False,
) -> None:
"""Delete a field from a user's profile.
Args:
target_user: the user whose profile is to be changed.
requester: The user attempting to make this change.
field_name: The name of the profile field to remove.
by_admin: Whether this change was made by an administrator.
deactivation: Whether this change was made while deactivating the user.
"""
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this homeserver")
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's profile")
await self.store.delete_profile_field(target_user, field_name)
# Custom fields do not propagate into the user directory *or* rooms.
profile = await self.store.get_profileinfo(target_user)
await self._third_party_rules.on_profile_update(
target_user.to_string(), profile, by_admin, deactivation
)
async def on_profile_query(self, args: JsonDict) -> JsonDict:
"""Handles federation profile query requests."""
@ -419,13 +536,24 @@ class ProfileHandler:
just_field = args.get("field", None)
response = {}
response: JsonDict = {}
try:
if just_field is None or just_field == "displayname":
if just_field is None or just_field == ProfileFields.DISPLAYNAME:
response["displayname"] = await self.store.get_profile_displayname(user)
if just_field is None or just_field == "avatar_url":
if just_field is None or just_field == ProfileFields.AVATAR_URL:
response["avatar_url"] = await self.store.get_profile_avatar_url(user)
if self.hs.config.experimental.msc4133_enabled:
if just_field is None:
response.update(await self.store.get_profile_fields(user))
elif just_field not in (
ProfileFields.DISPLAYNAME,
ProfileFields.AVATAR_URL,
):
response[just_field] = await self.store.get_profile_field(
user, just_field
)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)

View file

@ -29,7 +29,7 @@ from synapse.rest.client import (
account_validity,
appservice_ping,
auth,
auth_issuer,
auth_metadata,
capabilities,
delayed_events,
devices,
@ -121,7 +121,7 @@ CLIENT_SERVLET_FUNCTIONS: Tuple[RegisterServletsFunc, ...] = (
mutual_rooms.register_servlets,
login_token_request.register_servlets,
rendezvous.register_servlets,
auth_issuer.register_servlets,
auth_metadata.register_servlets,
)
SERVLET_GROUPS: Dict[str, Iterable[RegisterServletsFunc]] = {
@ -187,7 +187,7 @@ class ClientRestResource(JsonResource):
mutual_rooms.register_servlets,
login_token_request.register_servlets,
rendezvous.register_servlets,
auth_issuer.register_servlets,
auth_metadata.register_servlets,
]:
continue

View file

@ -23,6 +23,7 @@ from http import HTTPStatus
from typing import TYPE_CHECKING, List, Optional, Tuple, cast
import attr
from immutabledict import immutabledict
from synapse.api.constants import Direction, EventTypes, JoinRules, Membership
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
@ -463,7 +464,18 @@ class RoomStateRestServlet(RestServlet):
if not room:
raise NotFoundError("Room not found")
event_ids = await self._storage_controllers.state.get_current_state_ids(room_id)
state_filter = None
type = parse_string(request, "type")
if type:
state_filter = StateFilter(
types=immutabledict({type: None}),
include_others=False,
)
event_ids = await self._storage_controllers.state.get_current_state_ids(
room_id, state_filter
)
events = await self.store.get_events(event_ids.values())
now = self.clock.time_msec()
room_state = await self._event_serializer.serialize_events(events.values(), now)

View file

@ -32,6 +32,8 @@ logger = logging.getLogger(__name__)
class AuthIssuerServlet(RestServlet):
"""
Advertises what OpenID Connect issuer clients should use to authorise users.
This endpoint was defined in a previous iteration of MSC2965, and is still
used by some clients.
"""
PATTERNS = client_patterns(
@ -63,7 +65,42 @@ class AuthIssuerServlet(RestServlet):
)
class AuthMetadataServlet(RestServlet):
"""
Advertises the OAuth 2.0 server metadata for the homeserver.
"""
PATTERNS = client_patterns(
"/org.matrix.msc2965/auth_metadata$",
unstable=True,
releases=(),
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self._config = hs.config
self._auth = hs.get_auth()
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if self._config.experimental.msc3861.enabled:
# If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth
# We import lazily here because of the authlib requirement
from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth
auth = cast(MSC3861DelegatedAuth, self._auth)
return 200, await auth.auth_metadata()
else:
# Wouldn't expect this to be reached: the servlet shouldn't have been
# registered. Still, fail gracefully if we are registered for some reason.
raise SynapseError(
404,
"OIDC discovery has not been configured on this homeserver",
Codes.NOT_FOUND,
)
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
# We use the MSC3861 values as they are used by multiple MSCs
if hs.config.experimental.msc3861.enabled:
AuthIssuerServlet(hs).register(http_server)
AuthMetadataServlet(hs).register(http_server)

View file

@ -92,6 +92,23 @@ class CapabilitiesRestServlet(RestServlet):
"enabled": self.config.experimental.msc3664_enabled,
}
if self.config.experimental.msc4133_enabled:
response["capabilities"]["uk.tcpip.msc4133.profile_fields"] = {
"enabled": True,
}
# Ensure this is consistent with the legacy m.set_displayname and
# m.set_avatar_url.
disallowed = []
if not self.config.registration.enable_set_displayname:
disallowed.append("displayname")
if not self.config.registration.enable_set_avatar_url:
disallowed.append("avatar_url")
if disallowed:
response["capabilities"]["uk.tcpip.msc4133.profile_fields"][
"disallowed"
] = disallowed
return HTTPStatus.OK, response

View file

@ -24,7 +24,8 @@
import logging
from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError, SynapseError
from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.handlers.presence import format_user_presence_state
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
@ -48,6 +49,14 @@ class PresenceStatusRestServlet(RestServlet):
self.presence_handler = hs.get_presence_handler()
self.clock = hs.get_clock()
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
# Ratelimiter for presence updates, keyed by requester.
self._presence_per_user_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
cfg=hs.config.ratelimiting.rc_presence_per_user,
)
async def on_GET(
self, request: SynapseRequest, user_id: str
@ -82,6 +91,17 @@ class PresenceStatusRestServlet(RestServlet):
if requester.user != user:
raise AuthError(403, "Can only set your own presence state")
# ignore the presence update if the ratelimit is exceeded
try:
await self._presence_per_user_limiter.ratelimit(requester)
except LimitExceededError as e:
logger.debug("User presence ratelimit exceeded; ignoring it.")
return 429, {
"errcode": Codes.LIMIT_EXCEEDED,
"error": "Too many requests",
"retry_after_ms": e.retry_after_ms,
}
state = {}
content = parse_json_object_from_request(request)

View file

@ -21,10 +21,13 @@
"""This module contains REST servlets to do with profile: /profile/<paths>"""
import re
from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
from synapse.api.constants import ProfileFields
from synapse.api.errors import Codes, SynapseError
from synapse.handlers.profile import MAX_CUSTOM_FIELD_LEN
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
@ -33,7 +36,8 @@ from synapse.http.servlet import (
)
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns
from synapse.types import JsonDict, UserID
from synapse.types import JsonDict, JsonValue, UserID
from synapse.util.stringutils import is_namedspaced_grammar
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -91,6 +95,11 @@ class ProfileDisplaynameRestServlet(RestServlet):
async def on_PUT(
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
if not UserID.is_valid(user_id):
raise SynapseError(
HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM
)
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user = UserID.from_string(user_id)
is_admin = await self.auth.is_server_admin(requester)
@ -101,9 +110,7 @@ class ProfileDisplaynameRestServlet(RestServlet):
new_name = content["displayname"]
except Exception:
raise SynapseError(
code=400,
msg="Unable to parse name",
errcode=Codes.BAD_JSON,
400, "Missing key 'displayname'", errcode=Codes.MISSING_PARAM
)
propagate = _read_propagate(self.hs, request)
@ -166,6 +173,11 @@ class ProfileAvatarURLRestServlet(RestServlet):
async def on_PUT(
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
if not UserID.is_valid(user_id):
raise SynapseError(
HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM
)
requester = await self.auth.get_user_by_req(request)
user = UserID.from_string(user_id)
is_admin = await self.auth.is_server_admin(requester)
@ -232,7 +244,180 @@ class ProfileRestServlet(RestServlet):
return 200, ret
class UnstableProfileFieldRestServlet(RestServlet):
PATTERNS = [
re.compile(
r"^/_matrix/client/unstable/uk\.tcpip\.msc4133/profile/(?P<user_id>[^/]*)/(?P<field_name>[^/]*)"
)
]
CATEGORY = "Event sending requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.profile_handler = hs.get_profile_handler()
self.auth = hs.get_auth()
async def on_GET(
self, request: SynapseRequest, user_id: str, field_name: str
) -> Tuple[int, JsonDict]:
requester_user = None
if self.hs.config.server.require_auth_for_profile_requests:
requester = await self.auth.get_user_by_req(request)
requester_user = requester.user
if not UserID.is_valid(user_id):
raise SynapseError(
HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM
)
if not field_name:
raise SynapseError(400, "Field name too short", errcode=Codes.INVALID_PARAM)
if len(field_name.encode("utf-8")) > MAX_CUSTOM_FIELD_LEN:
raise SynapseError(400, "Field name too long", errcode=Codes.KEY_TOO_LARGE)
if not is_namedspaced_grammar(field_name):
raise SynapseError(
400,
"Field name does not follow Common Namespaced Identifier Grammar",
errcode=Codes.INVALID_PARAM,
)
user = UserID.from_string(user_id)
await self.profile_handler.check_profile_query_allowed(user, requester_user)
if field_name == ProfileFields.DISPLAYNAME:
field_value: JsonValue = await self.profile_handler.get_displayname(user)
elif field_name == ProfileFields.AVATAR_URL:
field_value = await self.profile_handler.get_avatar_url(user)
else:
field_value = await self.profile_handler.get_profile_field(user, field_name)
return 200, {field_name: field_value}
async def on_PUT(
self, request: SynapseRequest, user_id: str, field_name: str
) -> Tuple[int, JsonDict]:
if not UserID.is_valid(user_id):
raise SynapseError(
HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM
)
requester = await self.auth.get_user_by_req(request)
user = UserID.from_string(user_id)
is_admin = await self.auth.is_server_admin(requester)
if not field_name:
raise SynapseError(400, "Field name too short", errcode=Codes.INVALID_PARAM)
if len(field_name.encode("utf-8")) > MAX_CUSTOM_FIELD_LEN:
raise SynapseError(400, "Field name too long", errcode=Codes.KEY_TOO_LARGE)
if not is_namedspaced_grammar(field_name):
raise SynapseError(
400,
"Field name does not follow Common Namespaced Identifier Grammar",
errcode=Codes.INVALID_PARAM,
)
content = parse_json_object_from_request(request)
try:
new_value = content[field_name]
except KeyError:
raise SynapseError(
400, f"Missing key '{field_name}'", errcode=Codes.MISSING_PARAM
)
propagate = _read_propagate(self.hs, request)
requester_suspended = (
await self.hs.get_datastores().main.get_user_suspended_status(
requester.user.to_string()
)
)
if requester_suspended:
raise SynapseError(
403,
"Updating profile while account is suspended is not allowed.",
Codes.USER_ACCOUNT_SUSPENDED,
)
if field_name == ProfileFields.DISPLAYNAME:
await self.profile_handler.set_displayname(
user, requester, new_value, is_admin, propagate=propagate
)
elif field_name == ProfileFields.AVATAR_URL:
await self.profile_handler.set_avatar_url(
user, requester, new_value, is_admin, propagate=propagate
)
else:
await self.profile_handler.set_profile_field(
user, requester, field_name, new_value, is_admin
)
return 200, {}
async def on_DELETE(
self, request: SynapseRequest, user_id: str, field_name: str
) -> Tuple[int, JsonDict]:
if not UserID.is_valid(user_id):
raise SynapseError(
HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM
)
requester = await self.auth.get_user_by_req(request)
user = UserID.from_string(user_id)
is_admin = await self.auth.is_server_admin(requester)
if not field_name:
raise SynapseError(400, "Field name too short", errcode=Codes.INVALID_PARAM)
if len(field_name.encode("utf-8")) > MAX_CUSTOM_FIELD_LEN:
raise SynapseError(400, "Field name too long", errcode=Codes.KEY_TOO_LARGE)
if not is_namedspaced_grammar(field_name):
raise SynapseError(
400,
"Field name does not follow Common Namespaced Identifier Grammar",
errcode=Codes.INVALID_PARAM,
)
propagate = _read_propagate(self.hs, request)
requester_suspended = (
await self.hs.get_datastores().main.get_user_suspended_status(
requester.user.to_string()
)
)
if requester_suspended:
raise SynapseError(
403,
"Updating profile while account is suspended is not allowed.",
Codes.USER_ACCOUNT_SUSPENDED,
)
if field_name == ProfileFields.DISPLAYNAME:
await self.profile_handler.set_displayname(
user, requester, "", is_admin, propagate=propagate
)
elif field_name == ProfileFields.AVATAR_URL:
await self.profile_handler.set_avatar_url(
user, requester, "", is_admin, propagate=propagate
)
else:
await self.profile_handler.delete_profile_field(
user, requester, field_name, is_admin
)
return 200, {}
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
# The specific displayname / avatar URL / custom field endpoints *must* appear
# before their corresponding generic profile endpoint.
ProfileDisplaynameRestServlet(hs).register(http_server)
ProfileAvatarURLRestServlet(hs).register(http_server)
ProfileRestServlet(hs).register(http_server)
if hs.config.experimental.msc4133_enabled:
UnstableProfileFieldRestServlet(hs).register(http_server)

View file

@ -20,13 +20,11 @@
#
import logging
import re
from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
from synapse._pydantic_compat import StrictStr
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.api.urls import CLIENT_API_PREFIX
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
@ -127,16 +125,6 @@ class ReportRoomRestServlet(RestServlet):
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
# TODO: Remove the unstable variant after 2-3 releases
# https://github.com/element-hq/synapse/issues/17373
if hs.config.experimental.msc4151_enabled:
self.PATTERNS.append(
re.compile(
f"^{CLIENT_API_PREFIX}/unstable/org.matrix.msc4151"
"/rooms/(?P<room_id>[^/]*)/report$"
)
)
class PostBody(RequestBodyModel):
reason: StrictStr

View file

@ -24,9 +24,10 @@ from collections import defaultdict
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState
from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.api.errors import Codes, LimitExceededError, StoreError, SynapseError
from synapse.api.filtering import FilterCollection
from synapse.api.presence import UserPresenceState
from synapse.api.ratelimiting import Ratelimiter
from synapse.events.utils import (
SerializeEventConfig,
format_event_for_client_v2_without_room_id,
@ -126,6 +127,13 @@ class SyncRestServlet(RestServlet):
cache_name="sync_valid_filter",
)
# Ratelimiter for presence updates, keyed by requester.
self._presence_per_user_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
cfg=hs.config.ratelimiting.rc_presence_per_user,
)
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
# This will always be set by the time Twisted calls us.
assert request.args is not None
@ -239,7 +247,14 @@ class SyncRestServlet(RestServlet):
# send any outstanding server notices to the user.
await self._server_notices_sender.on_user_syncing(user.to_string())
affect_presence = set_presence != PresenceState.OFFLINE
# ignore the presence update if the ratelimit is exceeded but do not pause the request
try:
await self._presence_per_user_limiter.ratelimit(requester, pause=0.0)
except LimitExceededError:
affect_presence = False
logger.debug("User set_presence ratelimit exceeded; ignoring it.")
else:
affect_presence = set_presence != PresenceState.OFFLINE
context = await self.presence_handler.user_syncing(
user.to_string(),

View file

@ -170,10 +170,10 @@ class VersionsRestServlet(RestServlet):
),
# MSC4140: Delayed events
"org.matrix.msc4140": bool(self.config.server.max_event_delay_ms),
# MSC4151: Report room API (Client-Server API)
"org.matrix.msc4151": self.config.experimental.msc4151_enabled,
# Simplified sliding sync
"org.matrix.simplified_msc3575": msc3575_enabled,
# Arbitrary key-value profile fields.
"uk.tcpip.msc4133": self.config.experimental.msc4133_enabled,
},
},
)

View file

@ -391,7 +391,7 @@ class HomeServer(metaclass=abc.ABCMeta):
def is_mine(self, domain_specific_string: DomainSpecificString) -> bool:
return domain_specific_string.domain == self.hostname
def is_mine_id(self, string: str) -> bool:
def is_mine_id(self, user_id: str) -> bool:
"""Determines whether a user ID or room alias originates from this homeserver.
Returns:
@ -399,7 +399,7 @@ class HomeServer(metaclass=abc.ABCMeta):
homeserver.
`False` otherwise, or if the user ID or room alias is malformed.
"""
localpart_hostname = string.split(":", 1)
localpart_hostname = user_id.split(":", 1)
if len(localpart_hostname) < 2:
return False
return localpart_hostname[1] == self.hostname

View file

@ -59,11 +59,13 @@ from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.metrics import Measure, measure_func
from synapse.util.stringutils import shortstr
if TYPE_CHECKING:
from synapse.server import HomeServer
from synapse.storage.controllers import StateStorageController
from synapse.storage.databases.main import DataStore
from synapse.storage.databases.state.deletion import StateDeletionDataStore
logger = logging.getLogger(__name__)
metrics_logger = logging.getLogger("synapse.state.metrics")
@ -194,6 +196,8 @@ class StateHandler:
self._storage_controllers = hs.get_storage_controllers()
self._events_shard_config = hs.config.worker.events_shard_config
self._instance_name = hs.get_instance_name()
self._state_store = hs.get_datastores().state
self._state_deletion_store = hs.get_datastores().state_deletion
self._update_current_state_client = (
ReplicationUpdateCurrentStateRestServlet.make_client(hs)
@ -475,7 +479,10 @@ class StateHandler:
@trace
@measure_func()
async def resolve_state_groups_for_events(
self, room_id: str, event_ids: StrCollection, await_full_state: bool = True
self,
room_id: str,
event_ids: StrCollection,
await_full_state: bool = True,
) -> _StateCacheEntry:
"""Given a list of event_ids this method fetches the state at each
event, resolves conflicts between them and returns them.
@ -511,6 +518,17 @@ class StateHandler:
) = await self._state_storage_controller.get_state_group_delta(
state_group_id
)
if prev_group:
# Ensure that we still have the prev group, and ensure we don't
# delete it while we're persisting the event.
missing_state_group = await self._state_deletion_store.check_state_groups_and_bump_deletion(
{prev_group}
)
if missing_state_group:
prev_group = None
delta_ids = None
return _StateCacheEntry(
state=None,
state_group=state_group_id,
@ -531,7 +549,9 @@ class StateHandler:
room_version,
state_to_resolve,
None,
state_res_store=StateResolutionStore(self.store),
state_res_store=StateResolutionStore(
self.store, self._state_deletion_store
),
)
return result
@ -663,7 +683,25 @@ class StateResolutionHandler:
async with self.resolve_linearizer.queue(group_names):
cache = self._state_cache.get(group_names, None)
if cache:
return cache
# Check that the returned cache entry doesn't point to deleted
# state groups.
state_groups_to_check = set()
if cache.state_group is not None:
state_groups_to_check.add(cache.state_group)
if cache.prev_group is not None:
state_groups_to_check.add(cache.prev_group)
missing_state_groups = await state_res_store.state_deletion_store.check_state_groups_and_bump_deletion(
state_groups_to_check
)
if not missing_state_groups:
return cache
else:
# There are missing state groups, so let's remove the stale
# entry and continue as if it was a cache miss.
self._state_cache.pop(group_names, None)
logger.info(
"Resolving state for %s with groups %s",
@ -671,6 +709,16 @@ class StateResolutionHandler:
list(group_names),
)
# We double check that none of the state groups have been deleted.
# They shouldn't be as all these state groups should be referenced.
missing_state_groups = await state_res_store.state_deletion_store.check_state_groups_and_bump_deletion(
group_names
)
if missing_state_groups:
raise Exception(
f"State groups have been deleted: {shortstr(missing_state_groups)}"
)
state_groups_histogram.observe(len(state_groups_ids))
new_state = await self.resolve_events_with_store(
@ -884,7 +932,8 @@ class StateResolutionStore:
in well defined way.
"""
store: "DataStore"
main_store: "DataStore"
state_deletion_store: "StateDeletionDataStore"
def get_events(
self, event_ids: StrCollection, allow_rejected: bool = False
@ -899,7 +948,7 @@ class StateResolutionStore:
An awaitable which resolves to a dict from event_id to event.
"""
return self.store.get_events(
return self.main_store.get_events(
event_ids,
redact_behaviour=EventRedactBehaviour.as_is,
get_prev_content=False,
@ -920,4 +969,4 @@ class StateResolutionStore:
An awaitable that resolves to a set of event IDs.
"""
return self.store.get_auth_chain_difference(room_id, state_sets)
return self.main_store.get_auth_chain_difference(room_id, state_sets)

View file

@ -86,7 +86,9 @@ class SQLBaseStore(metaclass=ABCMeta):
"""
def _invalidate_state_caches(
self, room_id: str, members_changed: Collection[str]
self,
room_id: str,
members_changed: Collection[str],
) -> None:
"""Invalidates caches that are based on the current state, but does
not stream invalidations down replication.

View file

@ -789,7 +789,7 @@ class BackgroundUpdater:
# we may already have a half-built index. Let's just drop it
# before trying to create it again.
sql = "DROP INDEX IF EXISTS %s" % (index_name,)
sql = "DROP INDEX CONCURRENTLY IF EXISTS %s" % (index_name,)
logger.debug("[SQL] %s", sql)
c.execute(sql)
@ -814,7 +814,7 @@ class BackgroundUpdater:
if replaces_index is not None:
# We drop the old index as the new index has now been created.
sql = f"DROP INDEX IF EXISTS {replaces_index}"
sql = f"DROP INDEX CONCURRENTLY IF EXISTS {replaces_index}"
logger.debug("[SQL] %s", sql)
c.execute(sql)
finally:

View file

@ -332,6 +332,7 @@ class EventsPersistenceStorageController:
# store for now.
self.main_store = stores.main
self.state_store = stores.state
self._state_deletion_store = stores.state_deletion
assert stores.persist_events
self.persist_events_store = stores.persist_events
@ -549,7 +550,9 @@ class EventsPersistenceStorageController:
room_version,
state_maps_by_state_group,
event_map=None,
state_res_store=StateResolutionStore(self.main_store),
state_res_store=StateResolutionStore(
self.main_store, self._state_deletion_store
),
)
return await res.get_state(self._state_controller, StateFilter.all())
@ -635,15 +638,20 @@ class EventsPersistenceStorageController:
room_id, [e for e, _ in chunk]
)
await self.persist_events_store._persist_events_and_state_updates(
room_id,
chunk,
state_delta_for_room=state_delta_for_room,
new_forward_extremities=new_forward_extremities,
use_negative_stream_ordering=backfilled,
inhibit_local_membership_updates=backfilled,
new_event_links=new_event_links,
)
# Stop the state groups from being deleted while we're persisting
# them.
async with self._state_deletion_store.persisting_state_group_references(
events_and_contexts
):
await self.persist_events_store._persist_events_and_state_updates(
room_id,
chunk,
state_delta_for_room=state_delta_for_room,
new_forward_extremities=new_forward_extremities,
use_negative_stream_ordering=backfilled,
inhibit_local_membership_updates=backfilled,
new_event_links=new_event_links,
)
return replaced_events
@ -965,7 +973,9 @@ class EventsPersistenceStorageController:
room_version,
state_groups,
events_map,
state_res_store=StateResolutionStore(self.main_store),
state_res_store=StateResolutionStore(
self.main_store, self._state_deletion_store
),
)
state_resolutions_during_persistence.inc()

View file

@ -21,9 +21,10 @@
import itertools
import logging
from typing import TYPE_CHECKING, Set
from typing import TYPE_CHECKING, Collection, Mapping, Set
from synapse.logging.context import nested_logging_context
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage.databases import Databases
if TYPE_CHECKING:
@ -38,6 +39,11 @@ class PurgeEventsStorageController:
def __init__(self, hs: "HomeServer", stores: Databases):
self.stores = stores
if hs.config.worker.run_background_tasks:
self._delete_state_loop_call = hs.get_clock().looping_call(
self._delete_state_groups_loop, 60 * 1000
)
async def purge_room(self, room_id: str) -> None:
"""Deletes all record of a room"""
@ -68,11 +74,15 @@ class PurgeEventsStorageController:
logger.info("[purge] finding state groups that can be deleted")
sg_to_delete = await self._find_unreferenced_groups(state_groups)
await self.stores.state.purge_unreferenced_state_groups(
room_id, sg_to_delete
# Mark these state groups as pending deletion, they will actually
# get deleted automatically later.
await self.stores.state_deletion.mark_state_groups_as_pending_deletion(
sg_to_delete
)
async def _find_unreferenced_groups(self, state_groups: Set[int]) -> Set[int]:
async def _find_unreferenced_groups(
self, state_groups: Collection[int]
) -> Set[int]:
"""Used when purging history to figure out which state groups can be
deleted.
@ -118,6 +128,78 @@ class PurgeEventsStorageController:
next_to_search |= prevs
state_groups_seen |= prevs
# We also check to see if anything referencing the state groups are
# also unreferenced. This helps ensure that we delete unreferenced
# state groups, if we don't then we will de-delta them when we
# delete the other state groups leading to increased DB usage.
next_edges = await self.stores.state.get_next_state_groups(current_search)
nexts = set(next_edges.keys())
nexts -= state_groups_seen
next_to_search |= nexts
state_groups_seen |= nexts
to_delete = state_groups_seen - referenced_groups
return to_delete
@wrap_as_background_process("_delete_state_groups_loop")
async def _delete_state_groups_loop(self) -> None:
"""Background task that deletes any state groups that may be pending
deletion."""
while True:
next_to_delete = await self.stores.state_deletion.get_next_state_group_collection_to_delete()
if next_to_delete is None:
break
(room_id, groups_to_sequences) = next_to_delete
made_progress = await self._delete_state_groups(
room_id, groups_to_sequences
)
# If no progress was made in deleting the state groups, then we
# break to allow a pause before trying again next time we get
# called.
if not made_progress:
break
async def _delete_state_groups(
self, room_id: str, groups_to_sequences: Mapping[int, int]
) -> bool:
"""Tries to delete the given state groups.
Returns:
Whether we made progress in deleting the state groups (or marking
them as referenced).
"""
# We double check if any of the state groups have become referenced.
# This shouldn't happen, as any usages should cause the state group to
# be removed as pending deletion.
referenced_state_groups = await self.stores.main.get_referenced_state_groups(
groups_to_sequences
)
if referenced_state_groups:
# We mark any state groups that have become referenced as being
# used.
await self.stores.state_deletion.mark_state_groups_as_used(
referenced_state_groups
)
# Update list of state groups to remove referenced ones
groups_to_sequences = {
state_group: sequence_number
for state_group, sequence_number in groups_to_sequences.items()
if state_group not in referenced_state_groups
}
if not groups_to_sequences:
# We made progress here as long as we marked some state groups as
# now referenced.
return len(referenced_state_groups) > 0
return await self.stores.state.purge_unreferenced_state_groups(
room_id,
groups_to_sequences,
)

View file

@ -26,6 +26,7 @@ from synapse.storage._base import SQLBaseStore
from synapse.storage.database import DatabasePool, make_conn
from synapse.storage.databases.main.events import PersistEventsStore
from synapse.storage.databases.state import StateGroupDataStore
from synapse.storage.databases.state.deletion import StateDeletionDataStore
from synapse.storage.engines import create_engine
from synapse.storage.prepare_database import prepare_database
@ -49,12 +50,14 @@ class Databases(Generic[DataStoreT]):
main
state
persist_events
state_deletion
"""
databases: List[DatabasePool]
main: "DataStore" # FIXME: https://github.com/matrix-org/synapse/issues/11165: actually an instance of `main_store_class`
state: StateGroupDataStore
persist_events: Optional[PersistEventsStore]
state_deletion: StateDeletionDataStore
def __init__(self, main_store_class: Type[DataStoreT], hs: "HomeServer"):
# Note we pass in the main store class here as workers use a different main
@ -63,6 +66,7 @@ class Databases(Generic[DataStoreT]):
self.databases = []
main: Optional[DataStoreT] = None
state: Optional[StateGroupDataStore] = None
state_deletion: Optional[StateDeletionDataStore] = None
persist_events: Optional[PersistEventsStore] = None
for database_config in hs.config.database.databases:
@ -114,7 +118,8 @@ class Databases(Generic[DataStoreT]):
if state:
raise Exception("'state' data store already configured")
state = StateGroupDataStore(database, db_conn, hs)
state_deletion = StateDeletionDataStore(database, db_conn, hs)
state = StateGroupDataStore(database, db_conn, hs, state_deletion)
db_conn.commit()
@ -135,7 +140,7 @@ class Databases(Generic[DataStoreT]):
if not main:
raise Exception("No 'main' database configured")
if not state:
if not state or not state_deletion:
raise Exception("No 'state' database configured")
# We use local variables here to ensure that the databases do not have
@ -143,3 +148,4 @@ class Databases(Generic[DataStoreT]):
self.main = main # type: ignore[assignment]
self.state = state
self.persist_events = persist_events
self.state_deletion = state_deletion

View file

@ -219,6 +219,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
room_id = row.keys[0]
members_changed = set(row.keys[1:])
self._invalidate_state_caches(room_id, members_changed)
self._curr_state_delta_stream_cache.entity_has_changed( # type: ignore[attr-defined]
room_id, token
)
for user_id in members_changed:
self._membership_stream_cache.entity_has_changed(user_id, token) # type: ignore[attr-defined]
elif row.cache_func == PURGE_HISTORY_CACHE_NAME:
if row.keys is None:
raise Exception(
@ -236,6 +241,35 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
room_id = row.keys[0]
self._invalidate_caches_for_room_events(room_id)
self._invalidate_caches_for_room(room_id)
self._curr_state_delta_stream_cache.entity_has_changed( # type: ignore[attr-defined]
room_id, token
)
# Note: This code is commented out to improve cache performance.
# While uncommenting would provide complete correctness, our
# automatic forgotten room purge logic (see
# `forgotten_room_retention_period`) means this would frequently
# clear the entire cache (effectively) and probably have a noticable
# impact on the cache hit ratio.
#
# Not updating the cache here is safe because:
#
# 1. `_membership_stream_cache` is only used to indicate the
# *absence* of changes, i.e. "nothing has changed between tokens
# X and Y and so return early and don't query the database".
# 2. `_membership_stream_cache` is used when we query data from
# `current_state_delta_stream` and `room_memberships` but since
# nothing new is written to the database for those tables when
# purging/deleting a room (only deleting rows), there is nothing
# changed to care about.
#
# At worst, the cache might indicate a change at token X, at which
# point, we will query the database and discover nothing is there.
#
# Ideally, we would make it so that we could clear the cache on a
# more granular level but that's a bit complex and fiddly to do with
# room membership.
#
# self._membership_stream_cache.all_entities_changed(token) # type: ignore[attr-defined]
else:
self._attempt_to_invalidate_cache(row.cache_func, row.keys)
@ -275,6 +309,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._attempt_to_invalidate_cache(
"get_sliding_sync_rooms_for_user", None
)
self._membership_stream_cache.entity_has_changed(data.state_key, token) # type: ignore[attr-defined]
elif data.type == EventTypes.RoomEncryption:
self._attempt_to_invalidate_cache(
"get_room_encryption", (data.room_id,)
@ -291,6 +326,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
# Similar to the above, but the entire caches are invalidated. This is
# unfortunate for the membership caches, but should recover quickly.
self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined]
self._membership_stream_cache.all_entities_changed(token) # type: ignore[attr-defined]
self._attempt_to_invalidate_cache("get_rooms_for_user", None)
self._attempt_to_invalidate_cache("get_room_type", (data.room_id,))
self._attempt_to_invalidate_cache("get_room_encryption", (data.room_id,))

View file

@ -1605,7 +1605,13 @@ class PersistEventsStore:
room_id
delta_state: Deltas that are going to be used to update the
`current_state_events` table. Changes to the current state of the room.
stream_id: TODO
stream_id: This is expected to be the minimum `stream_ordering` for the
batch of events that we are persisting; which means we do not end up in a
situation where workers see events before the `current_state_delta` updates.
FIXME: However, this function also gets called with next upcoming
`stream_ordering` when we re-sync the state of a partial stated room (see
`update_current_state(...)`) which may be "correct" but it would be good to
nail down what exactly is the expected value here.
sliding_sync_table_changes: Changes to the
`sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` tables
derived from the given `delta_state` (see
@ -1908,6 +1914,13 @@ class PersistEventsStore:
stream_id,
)
for user_id in members_to_cache_bust:
txn.call_after(
self.store._membership_stream_cache.entity_has_changed,
user_id,
stream_id,
)
# Invalidate the various caches
self.store._invalidate_state_caches_and_stream(
txn, room_id, members_to_cache_bust

View file

@ -18,8 +18,13 @@
# [This file includes modifications made by New Vector Limited]
#
#
from typing import TYPE_CHECKING, Optional
import json
from typing import TYPE_CHECKING, Dict, Optional, Tuple, cast
from canonicaljson import encode_canonical_json
from synapse.api.constants import ProfileFields
from synapse.api.errors import Codes, StoreError
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
@ -27,13 +32,17 @@ from synapse.storage.database import (
LoggingTransaction,
)
from synapse.storage.databases.main.roommember import ProfileInfo
from synapse.storage.engines import PostgresEngine
from synapse.types import JsonDict, UserID
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.types import JsonDict, JsonValue, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
# The number of bytes that the serialized profile can have.
MAX_PROFILE_SIZE = 65536
class ProfileWorkerStore(SQLBaseStore):
def __init__(
self,
@ -201,6 +210,89 @@ class ProfileWorkerStore(SQLBaseStore):
desc="get_profile_avatar_url",
)
async def get_profile_field(self, user_id: UserID, field_name: str) -> JsonValue:
"""
Get a custom profile field for a user.
Args:
user_id: The user's ID.
field_name: The custom profile field name.
Returns:
The string value if the field exists, otherwise raises 404.
"""
def get_profile_field(txn: LoggingTransaction) -> JsonValue:
# This will error if field_name has double quotes in it, but that's not
# possible due to the grammar.
field_path = f'$."{field_name}"'
if isinstance(self.database_engine, PostgresEngine):
sql = """
SELECT JSONB_PATH_EXISTS(fields, ?), JSONB_EXTRACT_PATH(fields, ?)
FROM profiles
WHERE user_id = ?
"""
txn.execute(
sql,
(field_path, field_name, user_id.localpart),
)
# Test exists first since value being None is used for both
# missing and a null JSON value.
exists, value = cast(Tuple[bool, JsonValue], txn.fetchone())
if not exists:
raise StoreError(404, "No row found")
return value
else:
sql = """
SELECT JSON_TYPE(fields, ?), JSON_EXTRACT(fields, ?)
FROM profiles
WHERE user_id = ?
"""
txn.execute(
sql,
(field_path, field_path, user_id.localpart),
)
# If value_type is None, then the value did not exist.
value_type, value = cast(
Tuple[Optional[str], JsonValue], txn.fetchone()
)
if not value_type:
raise StoreError(404, "No row found")
# If value_type is object or array, then need to deserialize the JSON.
# Scalar values are properly returned directly.
if value_type in ("object", "array"):
assert isinstance(value, str)
return json.loads(value)
return value
return await self.db_pool.runInteraction("get_profile_field", get_profile_field)
async def get_profile_fields(self, user_id: UserID) -> Dict[str, str]:
"""
Get all custom profile fields for a user.
Args:
user_id: The user's ID.
Returns:
A dictionary of custom profile fields.
"""
result = await self.db_pool.simple_select_one_onecol(
table="profiles",
keyvalues={"full_user_id": user_id.to_string()},
retcol="fields",
desc="get_profile_fields",
)
# The SQLite driver doesn't automatically convert JSON to
# Python objects
if isinstance(self.database_engine, Sqlite3Engine) and result:
result = json.loads(result)
return result or {}
async def create_profile(self, user_id: UserID) -> None:
"""
Create a blank profile for a user.
@ -215,6 +307,71 @@ class ProfileWorkerStore(SQLBaseStore):
desc="create_profile",
)
def _check_profile_size(
self,
txn: LoggingTransaction,
user_id: UserID,
new_field_name: str,
new_value: JsonValue,
) -> None:
# For each entry there are 4 quotes (2 each for key and value), 1 colon,
# and 1 comma.
PER_VALUE_EXTRA = 6
# Add the size of the current custom profile fields, ignoring the entry
# which will be overwritten.
if isinstance(txn.database_engine, PostgresEngine):
size_sql = """
SELECT
OCTET_LENGTH((fields - ?)::text), OCTET_LENGTH(displayname), OCTET_LENGTH(avatar_url)
FROM profiles
WHERE
user_id = ?
"""
txn.execute(
size_sql,
(new_field_name, user_id.localpart),
)
else:
size_sql = """
SELECT
LENGTH(json_remove(fields, ?)), LENGTH(displayname), LENGTH(avatar_url)
FROM profiles
WHERE
user_id = ?
"""
txn.execute(
size_sql,
# This will error if field_name has double quotes in it, but that's not
# possible due to the grammar.
(f'$."{new_field_name}"', user_id.localpart),
)
row = cast(Tuple[Optional[int], Optional[int], Optional[int]], txn.fetchone())
# The values return null if the column is null.
total_bytes = (
# Discount the opening and closing braces to avoid double counting,
# but add one for a comma.
# -2 + 1 = -1
(row[0] - 1 if row[0] else 0)
+ (
row[1] + len("displayname") + PER_VALUE_EXTRA
if new_field_name != ProfileFields.DISPLAYNAME and row[1]
else 0
)
+ (
row[2] + len("avatar_url") + PER_VALUE_EXTRA
if new_field_name != ProfileFields.AVATAR_URL and row[2]
else 0
)
)
# Add the length of the field being added + the braces.
total_bytes += len(encode_canonical_json({new_field_name: new_value}))
if total_bytes > MAX_PROFILE_SIZE:
raise StoreError(400, "Profile too large", Codes.PROFILE_TOO_LARGE)
async def set_profile_displayname(
self, user_id: UserID, new_displayname: Optional[str]
) -> None:
@ -227,14 +384,25 @@ class ProfileWorkerStore(SQLBaseStore):
name is removed.
"""
user_localpart = user_id.localpart
await self.db_pool.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
values={
"displayname": new_displayname,
"full_user_id": user_id.to_string(),
},
desc="set_profile_displayname",
def set_profile_displayname(txn: LoggingTransaction) -> None:
if new_displayname is not None:
self._check_profile_size(
txn, user_id, ProfileFields.DISPLAYNAME, new_displayname
)
self.db_pool.simple_upsert_txn(
txn,
table="profiles",
keyvalues={"user_id": user_localpart},
values={
"displayname": new_displayname,
"full_user_id": user_id.to_string(),
},
)
await self.db_pool.runInteraction(
"set_profile_displayname", set_profile_displayname
)
async def set_profile_avatar_url(
@ -249,13 +417,125 @@ class ProfileWorkerStore(SQLBaseStore):
removed.
"""
user_localpart = user_id.localpart
await self.db_pool.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
values={"avatar_url": new_avatar_url, "full_user_id": user_id.to_string()},
desc="set_profile_avatar_url",
def set_profile_avatar_url(txn: LoggingTransaction) -> None:
if new_avatar_url is not None:
self._check_profile_size(
txn, user_id, ProfileFields.AVATAR_URL, new_avatar_url
)
self.db_pool.simple_upsert_txn(
txn,
table="profiles",
keyvalues={"user_id": user_localpart},
values={
"avatar_url": new_avatar_url,
"full_user_id": user_id.to_string(),
},
)
await self.db_pool.runInteraction(
"set_profile_avatar_url", set_profile_avatar_url
)
async def set_profile_field(
self, user_id: UserID, field_name: str, new_value: JsonValue
) -> None:
"""
Set a custom profile field for a user.
Args:
user_id: The user's ID.
field_name: The name of the custom profile field.
new_value: The value of the custom profile field.
"""
# Encode to canonical JSON.
canonical_value = encode_canonical_json(new_value)
def set_profile_field(txn: LoggingTransaction) -> None:
self._check_profile_size(txn, user_id, field_name, new_value)
if isinstance(self.database_engine, PostgresEngine):
from psycopg2.extras import Json
# Note that the || jsonb operator is not recursive, any duplicate
# keys will be taken from the second value.
sql = """
INSERT INTO profiles (user_id, full_user_id, fields) VALUES (?, ?, JSON_BUILD_OBJECT(?, ?::jsonb))
ON CONFLICT (user_id)
DO UPDATE SET full_user_id = EXCLUDED.full_user_id, fields = COALESCE(profiles.fields, '{}'::jsonb) || EXCLUDED.fields
"""
txn.execute(
sql,
(
user_id.localpart,
user_id.to_string(),
field_name,
# Pass as a JSON object since we have passing bytes disabled
# at the database driver.
Json(json.loads(canonical_value)),
),
)
else:
# You may be tempted to use json_patch instead of providing the parameters
# twice, but that recursively merges objects instead of replacing.
sql = """
INSERT INTO profiles (user_id, full_user_id, fields) VALUES (?, ?, JSON_OBJECT(?, JSON(?)))
ON CONFLICT (user_id)
DO UPDATE SET full_user_id = EXCLUDED.full_user_id, fields = JSON_SET(COALESCE(profiles.fields, '{}'), ?, JSON(?))
"""
# This will error if field_name has double quotes in it, but that's not
# possible due to the grammar.
json_field_name = f'$."{field_name}"'
txn.execute(
sql,
(
user_id.localpart,
user_id.to_string(),
json_field_name,
canonical_value,
json_field_name,
canonical_value,
),
)
await self.db_pool.runInteraction("set_profile_field", set_profile_field)
async def delete_profile_field(self, user_id: UserID, field_name: str) -> None:
"""
Remove a custom profile field for a user.
Args:
user_id: The user's ID.
field_name: The name of the custom profile field.
"""
def delete_profile_field(txn: LoggingTransaction) -> None:
if isinstance(self.database_engine, PostgresEngine):
sql = """
UPDATE profiles SET fields = fields - ?
WHERE user_id = ?
"""
txn.execute(
sql,
(field_name, user_id.localpart),
)
else:
sql = """
UPDATE profiles SET fields = json_remove(fields, ?)
WHERE user_id = ?
"""
txn.execute(
sql,
# This will error if field_name has double quotes in it.
(f'$."{field_name}"', user_id.localpart),
)
await self.db_pool.runInteraction("delete_profile_field", delete_profile_field)
class ProfileStore(ProfileWorkerStore):
pass

View file

@ -1181,6 +1181,50 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
return total_media_quarantined
async def block_room(self, room_id: str, user_id: str) -> None:
"""Marks the room as blocked.
Can be called multiple times (though we'll only track the last user to
block this room).
Can be called on a room unknown to this homeserver.
Args:
room_id: Room to block
user_id: Who blocked it
"""
await self.db_pool.simple_upsert(
table="blocked_rooms",
keyvalues={"room_id": room_id},
values={},
insertion_values={"user_id": user_id},
desc="block_room",
)
await self.db_pool.runInteraction(
"block_room_invalidation",
self._invalidate_cache_and_stream,
self.is_room_blocked,
(room_id,),
)
async def unblock_room(self, room_id: str) -> None:
"""Remove the room from blocking list.
Args:
room_id: Room to unblock
"""
await self.db_pool.simple_delete(
table="blocked_rooms",
keyvalues={"room_id": room_id},
desc="unblock_room",
)
await self.db_pool.runInteraction(
"block_room_invalidation",
self._invalidate_cache_and_stream,
self.is_room_blocked,
(room_id,),
)
async def get_rooms_for_retention_period_in_range(
self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False
) -> Dict[str, RetentionPolicy]:
@ -2500,50 +2544,6 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
)
return next_id
async def block_room(self, room_id: str, user_id: str) -> None:
"""Marks the room as blocked.
Can be called multiple times (though we'll only track the last user to
block this room).
Can be called on a room unknown to this homeserver.
Args:
room_id: Room to block
user_id: Who blocked it
"""
await self.db_pool.simple_upsert(
table="blocked_rooms",
keyvalues={"room_id": room_id},
values={},
insertion_values={"user_id": user_id},
desc="block_room",
)
await self.db_pool.runInteraction(
"block_room_invalidation",
self._invalidate_cache_and_stream,
self.is_room_blocked,
(room_id,),
)
async def unblock_room(self, room_id: str) -> None:
"""Remove the room from blocking list.
Args:
room_id: Room to unblock
"""
await self.db_pool.simple_delete(
table="blocked_rooms",
keyvalues={"room_id": room_id},
desc="unblock_room",
)
await self.db_pool.runInteraction(
"block_room_invalidation",
self._invalidate_cache_and_stream,
self.is_room_blocked,
(room_id,),
)
async def clear_partial_state_room(self, room_id: str) -> Optional[int]:
"""Clears the partial state flag for a room.

View file

@ -0,0 +1,511 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2025 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import contextlib
from typing import (
TYPE_CHECKING,
AbstractSet,
AsyncIterator,
Collection,
Mapping,
Optional,
Set,
Tuple,
)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
make_in_list_sql_clause,
)
from synapse.storage.engines import PostgresEngine
from synapse.util.stringutils import shortstr
if TYPE_CHECKING:
from synapse.server import HomeServer
class StateDeletionDataStore:
"""Manages deletion of state groups in a safe manner.
Deleting state groups is challenging as before we actually delete them we
need to ensure that there are no in-flight events that refer to the state
groups that we want to delete.
To handle this, we take two approaches. First, before we persist any event
we ensure that the state group still exists and mark in the
`state_groups_persisting` table that the state group is about to be used.
(Note that we have to have the extra table here as state groups and events
can be in different databases, and thus we can't check for the existence of
state groups in the persist event transaction). Once the event has been
persisted, we can remove the row from `state_groups_persisting`. So long as
we check that table before deleting state groups, we can ensure that we
never persist events that reference deleted state groups, maintaining
database integrity.
However, we want to avoid throwing exceptions so deep in the process of
persisting events. So instead of deleting state groups immediately, we mark
them as pending/proposed for deletion and wait for a certain amount of time
before performing the deletion. When we come to handle new events that
reference state groups, we check if they are pending deletion and bump the
time for when they'll be deleted (to give a chance for the event to be
persisted, or not).
When deleting, we need to check that state groups remain unreferenced. There
is a race here where we a) fetch state groups that are ready for deletion,
b) check they're unreferenced, c) the state group becomes referenced but
then gets marked as pending deletion again, d) during the deletion
transaction we recheck `state_groups_pending_deletion` table again and see
that it exists and so continue with the deletion. To prevent this from
happening we add a `sequence_number` column to
`state_groups_pending_deletion`, and during deletion we ensure that for a
state group we're about to delete that the sequence number doesn't change
between steps (a) and (d). So long as we always bump the sequence number
whenever an event may become used the race can never happen.
"""
# How long to wait before we delete state groups. This should be long enough
# for any in-flight events to be persisted. If events take longer to persist
# and any of the state groups they reference have been deleted, then the
# event will fail to persist (as well as any event in the same batch).
DELAY_BEFORE_DELETION_MS = 10 * 60 * 1000
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
self._clock = hs.get_clock()
self.db_pool = database
self._instance_name = hs.get_instance_name()
# TODO: Clear from `state_groups_persisting` any holdovers from previous
# running instance.
async def check_state_groups_and_bump_deletion(
self, state_groups: AbstractSet[int]
) -> Collection[int]:
"""Checks to make sure that the state groups haven't been deleted, and
if they're pending deletion we delay it (allowing time for any event
that will use them to finish persisting).
Returns:
The state groups that are missing, if any.
"""
return await self.db_pool.runInteraction(
"check_state_groups_and_bump_deletion",
self._check_state_groups_and_bump_deletion_txn,
state_groups,
)
def _check_state_groups_and_bump_deletion_txn(
self, txn: LoggingTransaction, state_groups: AbstractSet[int]
) -> Collection[int]:
existing_state_groups = self._get_existing_groups_with_lock(txn, state_groups)
self._bump_deletion_txn(txn, existing_state_groups)
missing_state_groups = state_groups - existing_state_groups
if missing_state_groups:
return missing_state_groups
return ()
def _bump_deletion_txn(
self, txn: LoggingTransaction, state_groups: Collection[int]
) -> None:
"""Update any pending deletions of the state group that they may now be
referenced."""
if not state_groups:
return
now = self._clock.time_msec()
if isinstance(self.db_pool.engine, PostgresEngine):
clause, args = make_in_list_sql_clause(
self.db_pool.engine, "state_group", state_groups
)
sql = f"""
UPDATE state_groups_pending_deletion
SET sequence_number = DEFAULT, insertion_ts = ?
WHERE {clause}
"""
args.insert(0, now)
txn.execute(sql, args)
else:
rows = self.db_pool.simple_select_many_txn(
txn,
table="state_groups_pending_deletion",
column="state_group",
iterable=state_groups,
keyvalues={},
retcols=("state_group",),
)
if not rows:
return
state_groups_to_update = [state_group for (state_group,) in rows]
self.db_pool.simple_delete_many_txn(
txn,
table="state_groups_pending_deletion",
column="state_group",
values=state_groups_to_update,
keyvalues={},
)
self.db_pool.simple_insert_many_txn(
txn,
table="state_groups_pending_deletion",
keys=("state_group", "insertion_ts"),
values=[(state_group, now) for state_group in state_groups_to_update],
)
def _get_existing_groups_with_lock(
self, txn: LoggingTransaction, state_groups: Collection[int]
) -> AbstractSet[int]:
"""Return which of the given state groups are in the database, and locks
those rows with `KEY SHARE` to ensure they don't get concurrently
deleted."""
clause, args = make_in_list_sql_clause(self.db_pool.engine, "id", state_groups)
sql = f"""
SELECT id FROM state_groups
WHERE {clause}
"""
if isinstance(self.db_pool.engine, PostgresEngine):
# On postgres we add a row level lock to the rows to ensure that we
# conflict with any concurrent DELETEs. `FOR KEY SHARE` lock will
# not conflict with other read
sql += """
FOR KEY SHARE
"""
txn.execute(sql, args)
return {state_group for (state_group,) in txn}
@contextlib.asynccontextmanager
async def persisting_state_group_references(
self, event_and_contexts: Collection[Tuple[EventBase, EventContext]]
) -> AsyncIterator[None]:
"""Wraps the persistence of the given events and contexts, ensuring that
any state groups referenced still exist and that they don't get deleted
during this."""
referenced_state_groups: Set[int] = set()
for event, ctx in event_and_contexts:
if ctx.rejected or event.internal_metadata.is_outlier():
continue
assert ctx.state_group is not None
referenced_state_groups.add(ctx.state_group)
if ctx.state_group_before_event:
referenced_state_groups.add(ctx.state_group_before_event)
if not referenced_state_groups:
# We don't reference any state groups, so nothing to do
yield
return
await self.db_pool.runInteraction(
"mark_state_groups_as_persisting",
self._mark_state_groups_as_persisting_txn,
referenced_state_groups,
)
error = True
try:
yield None
error = False
finally:
await self.db_pool.runInteraction(
"finish_persisting",
self._finish_persisting_txn,
referenced_state_groups,
error=error,
)
def _mark_state_groups_as_persisting_txn(
self, txn: LoggingTransaction, state_groups: Set[int]
) -> None:
"""Marks the given state groups as being persisted."""
existing_state_groups = self._get_existing_groups_with_lock(txn, state_groups)
missing_state_groups = state_groups - existing_state_groups
if missing_state_groups:
raise Exception(
f"state groups have been deleted: {shortstr(missing_state_groups)}"
)
self.db_pool.simple_insert_many_txn(
txn,
table="state_groups_persisting",
keys=("state_group", "instance_name"),
values=[(state_group, self._instance_name) for state_group in state_groups],
)
def _finish_persisting_txn(
self, txn: LoggingTransaction, state_groups: Collection[int], error: bool
) -> None:
"""Mark the state groups as having finished persistence.
If `error` is true then we assume the state groups were not persisted,
and so we do not clear them from the pending deletion table.
"""
self.db_pool.simple_delete_many_txn(
txn,
table="state_groups_persisting",
column="state_group",
values=state_groups,
keyvalues={"instance_name": self._instance_name},
)
if error:
# The state groups may or may not have been persisted, so we need to
# bump the deletion to ensure we recheck if they have become
# referenced.
self._bump_deletion_txn(txn, state_groups)
return
self.db_pool.simple_delete_many_batch_txn(
txn,
table="state_groups_pending_deletion",
keys=("state_group",),
values=[(state_group,) for state_group in state_groups],
)
async def mark_state_groups_as_pending_deletion(
self, state_groups: Collection[int]
) -> None:
"""Mark the given state groups as pending deletion"""
now = self._clock.time_msec()
await self.db_pool.simple_upsert_many(
table="state_groups_pending_deletion",
key_names=("state_group",),
key_values=[(state_group,) for state_group in state_groups],
value_names=("insertion_ts",),
value_values=[(now,) for _ in state_groups],
desc="mark_state_groups_as_pending_deletion",
)
async def mark_state_groups_as_used(self, state_groups: Collection[int]) -> None:
"""Mark the given state groups as now being referenced"""
await self.db_pool.simple_delete_many(
table="state_groups_pending_deletion",
column="state_group",
iterable=state_groups,
keyvalues={},
desc="mark_state_groups_as_used",
)
async def get_pending_deletions(
self, state_groups: Collection[int]
) -> Mapping[int, int]:
"""Get which state groups are pending deletion.
Returns:
a mapping from state groups that are pending deletion to their
sequence number
"""
rows = await self.db_pool.simple_select_many_batch(
table="state_groups_pending_deletion",
column="state_group",
iterable=state_groups,
retcols=("state_group", "sequence_number"),
keyvalues={},
desc="get_pending_deletions",
)
return dict(rows)
def get_state_groups_ready_for_potential_deletion_txn(
self,
txn: LoggingTransaction,
state_groups_to_sequence_numbers: Mapping[int, int],
) -> Collection[int]:
"""Given a set of state groups, return which state groups can
potentially be deleted.
The state groups must have been checked to see if they remain
unreferenced before calling this function.
Note: This must be called within the same transaction that the state
groups are deleted.
Args:
state_groups_to_sequence_numbers: The state groups, and the sequence
numbers from before the state groups were checked to see if they
were unreferenced.
Returns:
The subset of state groups that can safely be deleted
"""
if not state_groups_to_sequence_numbers:
return state_groups_to_sequence_numbers
if isinstance(self.db_pool.engine, PostgresEngine):
# On postgres we want to lock the rows FOR UPDATE as early as
# possible to help conflicts.
clause, args = make_in_list_sql_clause(
self.db_pool.engine, "id", state_groups_to_sequence_numbers
)
sql = f"""
SELECT id FROM state_groups
WHERE {clause}
FOR UPDATE
"""
txn.execute(sql, args)
# Check the deletion status in the DB of the given state groups
clause, args = make_in_list_sql_clause(
self.db_pool.engine,
column="state_group",
iterable=state_groups_to_sequence_numbers,
)
sql = f"""
SELECT state_group, insertion_ts, sequence_number FROM (
SELECT state_group, insertion_ts, sequence_number FROM state_groups_pending_deletion
UNION
SELECT state_group, null, null FROM state_groups_persisting
) AS s
WHERE {clause}
"""
txn.execute(sql, args)
# The above query will return potentially two rows per state group (one
# for each table), so we track which state groups have enough time
# elapsed and which are not ready to be persisted.
ready_to_be_deleted = set()
not_ready_to_be_deleted = set()
now = self._clock.time_msec()
for state_group, insertion_ts, sequence_number in txn:
if insertion_ts is None:
# A null insertion_ts means that we are currently persisting
# events that reference the state group, so we don't delete
# them.
not_ready_to_be_deleted.add(state_group)
continue
# We know this can't be None if insertion_ts is not None
assert sequence_number is not None
# Check if the sequence number has changed, if it has then it
# indicates that the state group may have become referenced since we
# checked.
if state_groups_to_sequence_numbers[state_group] != sequence_number:
not_ready_to_be_deleted.add(state_group)
continue
if now - insertion_ts < self.DELAY_BEFORE_DELETION_MS:
# Not enough time has elapsed to allow us to delete.
not_ready_to_be_deleted.add(state_group)
continue
ready_to_be_deleted.add(state_group)
can_be_deleted = ready_to_be_deleted - not_ready_to_be_deleted
if not_ready_to_be_deleted:
# If there are any state groups that aren't ready to be deleted,
# then we also need to remove any state groups that are referenced
# by them.
clause, args = make_in_list_sql_clause(
self.db_pool.engine,
column="state_group",
iterable=state_groups_to_sequence_numbers,
)
sql = f"""
WITH RECURSIVE ancestors(state_group) AS (
SELECT DISTINCT prev_state_group
FROM state_group_edges WHERE {clause}
UNION
SELECT prev_state_group
FROM state_group_edges
INNER JOIN ancestors USING (state_group)
)
SELECT state_group FROM ancestors
"""
txn.execute(sql, args)
can_be_deleted.difference_update(state_group for (state_group,) in txn)
return can_be_deleted
async def get_next_state_group_collection_to_delete(
self,
) -> Optional[Tuple[str, Mapping[int, int]]]:
"""Get the next set of state groups to try and delete
Returns:
2-tuple of room_id and mapping of state groups to sequence number.
"""
return await self.db_pool.runInteraction(
"get_next_state_group_collection_to_delete",
self._get_next_state_group_collection_to_delete_txn,
)
def _get_next_state_group_collection_to_delete_txn(
self,
txn: LoggingTransaction,
) -> Optional[Tuple[str, Mapping[int, int]]]:
"""Implementation of `get_next_state_group_collection_to_delete`"""
# We want to return chunks of state groups that were marked for deletion
# at the same time (this isn't necessary, just more efficient). We do
# this by looking for the oldest insertion_ts, and then pulling out all
# rows that have the same insertion_ts (and room ID).
now = self._clock.time_msec()
sql = """
SELECT room_id, insertion_ts
FROM state_groups_pending_deletion AS sd
INNER JOIN state_groups AS sg ON (id = sd.state_group)
LEFT JOIN state_groups_persisting AS sp USING (state_group)
WHERE insertion_ts < ? AND sp.state_group IS NULL
ORDER BY insertion_ts
LIMIT 1
"""
txn.execute(sql, (now - self.DELAY_BEFORE_DELETION_MS,))
row = txn.fetchone()
if not row:
return None
(room_id, insertion_ts) = row
sql = """
SELECT state_group, sequence_number
FROM state_groups_pending_deletion AS sd
INNER JOIN state_groups AS sg ON (id = sd.state_group)
LEFT JOIN state_groups_persisting AS sp USING (state_group)
WHERE room_id = ? AND insertion_ts = ? AND sp.state_group IS NULL
ORDER BY insertion_ts
"""
txn.execute(sql, (room_id, insertion_ts))
return room_id, dict(txn)

View file

@ -22,10 +22,10 @@
import logging
from typing import (
TYPE_CHECKING,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
@ -36,7 +36,10 @@ import attr
from synapse.api.constants import EventTypes
from synapse.events import EventBase
from synapse.events.snapshot import UnpersistedEventContext, UnpersistedEventContextBase
from synapse.events.snapshot import (
UnpersistedEventContext,
UnpersistedEventContextBase,
)
from synapse.logging.opentracing import tag_args, trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
@ -55,6 +58,7 @@ from synapse.util.cancellation import cancellable
if TYPE_CHECKING:
from synapse.server import HomeServer
from synapse.storage.databases.state.deletion import StateDeletionDataStore
logger = logging.getLogger(__name__)
@ -83,8 +87,10 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
state_deletion_store: "StateDeletionDataStore",
):
super().__init__(database, db_conn, hs)
self._state_deletion_store = state_deletion_store
# Originally the state store used a single DictionaryCache to cache the
# event IDs for the state types in a given state group to avoid hammering
@ -467,14 +473,15 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
Returns:
A list of state groups
"""
is_in_db = self.db_pool.simple_select_one_onecol_txn(
txn,
table="state_groups",
keyvalues={"id": prev_group},
retcol="id",
allow_none=True,
# We need to check that the prev group isn't about to be deleted
is_missing = (
self._state_deletion_store._check_state_groups_and_bump_deletion_txn(
txn,
{prev_group},
)
)
if not is_in_db:
if is_missing:
raise Exception(
"Trying to persist state with unpersisted prev_group: %r"
% (prev_group,)
@ -546,6 +553,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
for key, state_id in context.state_delta_due_to_event.items()
],
)
return events_and_context
return await self.db_pool.runInteraction(
@ -601,14 +609,15 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
The state group if successfully created, or None if the state
needs to be persisted as a full state.
"""
is_in_db = self.db_pool.simple_select_one_onecol_txn(
txn,
table="state_groups",
keyvalues={"id": prev_group},
retcol="id",
allow_none=True,
# We need to check that the prev group isn't about to be deleted
is_missing = (
self._state_deletion_store._check_state_groups_and_bump_deletion_txn(
txn,
{prev_group},
)
)
if not is_in_db:
if is_missing:
raise Exception(
"Trying to persist state with unpersisted prev_group: %r"
% (prev_group,)
@ -726,8 +735,10 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
)
async def purge_unreferenced_state_groups(
self, room_id: str, state_groups_to_delete: Collection[int]
) -> None:
self,
room_id: str,
state_groups_to_sequence_numbers: Mapping[int, int],
) -> bool:
"""Deletes no longer referenced state groups and de-deltas any state
groups that reference them.
@ -735,21 +746,31 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
room_id: The room the state groups belong to (must all be in the
same room).
state_groups_to_delete: Set of all state groups to delete.
Returns:
Whether any state groups were actually deleted.
"""
await self.db_pool.runInteraction(
return await self.db_pool.runInteraction(
"purge_unreferenced_state_groups",
self._purge_unreferenced_state_groups,
room_id,
state_groups_to_delete,
state_groups_to_sequence_numbers,
)
def _purge_unreferenced_state_groups(
self,
txn: LoggingTransaction,
room_id: str,
state_groups_to_delete: Collection[int],
) -> None:
state_groups_to_sequence_numbers: Mapping[int, int],
) -> bool:
state_groups_to_delete = self._state_deletion_store.get_state_groups_ready_for_potential_deletion_txn(
txn, state_groups_to_sequence_numbers
)
if not state_groups_to_delete:
return False
logger.info(
"[purge] found %i state groups to delete", len(state_groups_to_delete)
)
@ -812,6 +833,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
[(sg,) for sg in state_groups_to_delete],
)
return True
@trace
@tag_args
async def get_previous_state_groups(
@ -830,7 +853,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
List[Tuple[int, int]],
await self.db_pool.simple_select_many_batch(
table="state_group_edges",
column="prev_state_group",
column="state_group",
iterable=state_groups,
keyvalues={},
retcols=("state_group", "prev_state_group"),
@ -840,6 +863,35 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
return dict(rows)
@trace
@tag_args
async def get_next_state_groups(
self, state_groups: Iterable[int]
) -> Dict[int, int]:
"""Fetch the groups that have the given state groups as their previous
state groups.
Args:
state_groups
Returns:
A mapping from state group to previous state group.
"""
rows = cast(
List[Tuple[int, int]],
await self.db_pool.simple_select_many_batch(
table="state_group_edges",
column="prev_state_group",
iterable=state_groups,
keyvalues={},
retcols=("state_group", "prev_state_group"),
desc="get_next_state_groups",
),
)
return dict(rows)
async def purge_room_state(self, room_id: str) -> None:
return await self.db_pool.runInteraction(
"purge_room_state",

View file

@ -19,7 +19,7 @@
#
#
SCHEMA_VERSION = 88 # remember to update the list below when updating
SCHEMA_VERSION = 89 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@ -155,6 +155,9 @@ Changes in SCHEMA_VERSION = 88
be posted in response to a resettable timeout or an on-demand action.
- Add background update to fix data integrity issue in the
`sliding_sync_membership_snapshots` -> `forgotten` column
Changes in SCHEMA_VERSION = 89
- Add `state_groups_pending_deletion` and `state_groups_persisting` tables.
"""

View file

@ -0,0 +1,15 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2024 Patrick Cloke
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Custom profile fields.
ALTER TABLE profiles ADD COLUMN fields JSONB;

View file

@ -0,0 +1,39 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2025 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- See the `StateDeletionDataStore` for details of these tables.
-- We add state groups to this table when we want to later delete them. The
-- `insertion_ts` column indicates when the state group was proposed for
-- deletion (rather than when it should be deleted).
CREATE TABLE IF NOT EXISTS state_groups_pending_deletion (
sequence_number $%AUTO_INCREMENT_PRIMARY_KEY%$,
state_group BIGINT NOT NULL,
insertion_ts BIGINT NOT NULL
);
CREATE UNIQUE INDEX state_groups_pending_deletion_state_group ON state_groups_pending_deletion(state_group);
CREATE INDEX state_groups_pending_deletion_insertion_ts ON state_groups_pending_deletion(insertion_ts);
-- Holds the state groups the worker is currently persisting.
--
-- The `sequence_number` column of the `state_groups_pending_deletion` table
-- *must* be updated whenever a state group may have become referenced.
CREATE TABLE IF NOT EXISTS state_groups_persisting (
state_group BIGINT NOT NULL,
instance_name TEXT NOT NULL,
PRIMARY KEY (state_group, instance_name)
);
CREATE INDEX state_groups_persisting_instance_name ON state_groups_persisting(instance_name);

View file

@ -314,6 +314,15 @@ class StreamChangeCache:
self._entity_to_key[entity] = stream_pos
self._evict()
def all_entities_changed(self, stream_pos: int) -> None:
"""
Mark all entities as changed. This is useful when the cache is invalidated and
there may be some potential change for all of the entities.
"""
self._cache.clear()
self._entity_to_key.clear()
self._earliest_known_stream_pos = stream_pos
def _evict(self) -> None:
"""
Ensure the cache has not exceeded the maximum size.

View file

@ -43,6 +43,14 @@ CLIENT_SECRET_REGEX = re.compile(r"^[0-9a-zA-Z\.=_\-]+$")
#
MXC_REGEX = re.compile("^mxc://([^/]+)/([^/#?]+)$")
# https://spec.matrix.org/v1.13/appendices/#common-namespaced-identifier-grammar
#
# At least one character, less than or equal to 255 characters. Must start with
# a-z, the rest is a-z, 0-9, -, _, or ..
#
# This doesn't check anything about validity of namespaces.
NAMESPACED_GRAMMAR = re.compile(r"^[a-z][a-z0-9_.-]{0,254}$")
def random_string(length: int) -> str:
"""Generate a cryptographically secure string of random letters.
@ -68,6 +76,10 @@ def is_ascii(s: bytes) -> bool:
return True
def is_namedspaced_grammar(s: str) -> bool:
return bool(NAMESPACED_GRAMMAR.match(s))
def assert_valid_client_secret(client_secret: str) -> None:
"""Validate that a given string matches the client_secret defined by the spec"""
if (

View file

@ -39,7 +39,7 @@ from synapse.module_api import ModuleApi
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.types import StreamToken, create_requester
from synapse.types import StreamToken, UserID, UserInfo, create_requester
from synapse.util import Clock
from tests.handlers.test_sync import generate_sync_config
@ -349,6 +349,169 @@ class AutoAcceptInvitesTestCase(FederatingHomeserverTestCase):
join_updates, _ = sync_join(self, invited_user_id)
self.assertEqual(len(join_updates), 0)
@override_config(
{
"auto_accept_invites": {
"enabled": True,
},
}
)
async def test_ignore_invite_for_missing_user(self) -> None:
"""Tests that receiving an invite for a missing user is ignored."""
inviting_user_id = self.register_user("inviter", "pass")
inviting_user_tok = self.login("inviter", "pass")
# A local user who receives an invite
invited_user_id = "@fake:" + self.hs.config.server.server_name
# Create a room and send an invite to the other user
room_id = self.helper.create_room_as(
inviting_user_id,
tok=inviting_user_tok,
)
self.helper.invite(
room_id,
inviting_user_id,
invited_user_id,
tok=inviting_user_tok,
)
join_updates, _ = sync_join(self, inviting_user_id)
# Assert that the last event in the room was not a member event for the target user.
self.assertEqual(
join_updates[0].timeline.events[-1].content["membership"], "invite"
)
@override_config(
{
"auto_accept_invites": {
"enabled": True,
},
}
)
async def test_ignore_invite_for_deactivated_user(self) -> None:
"""Tests that receiving an invite for a deactivated user is ignored."""
inviting_user_id = self.register_user("inviter", "pass", admin=True)
inviting_user_tok = self.login("inviter", "pass")
# A local user who receives an invite
invited_user_id = self.register_user("invitee", "pass")
# Create a room and send an invite to the other user
room_id = self.helper.create_room_as(
inviting_user_id,
tok=inviting_user_tok,
)
channel = self.make_request(
"PUT",
"/_synapse/admin/v2/users/%s" % invited_user_id,
{"deactivated": True},
access_token=inviting_user_tok,
)
assert channel.code == 200
self.helper.invite(
room_id,
inviting_user_id,
invited_user_id,
tok=inviting_user_tok,
)
join_updates, b = sync_join(self, inviting_user_id)
# Assert that the last event in the room was not a member event for the target user.
self.assertEqual(
join_updates[0].timeline.events[-1].content["membership"], "invite"
)
@override_config(
{
"auto_accept_invites": {
"enabled": True,
},
}
)
async def test_ignore_invite_for_suspended_user(self) -> None:
"""Tests that receiving an invite for a suspended user is ignored."""
inviting_user_id = self.register_user("inviter", "pass", admin=True)
inviting_user_tok = self.login("inviter", "pass")
# A local user who receives an invite
invited_user_id = self.register_user("invitee", "pass")
# Create a room and send an invite to the other user
room_id = self.helper.create_room_as(
inviting_user_id,
tok=inviting_user_tok,
)
channel = self.make_request(
"PUT",
f"/_synapse/admin/v1/suspend/{invited_user_id}",
{"suspend": True},
access_token=inviting_user_tok,
)
assert channel.code == 200
self.helper.invite(
room_id,
inviting_user_id,
invited_user_id,
tok=inviting_user_tok,
)
join_updates, b = sync_join(self, inviting_user_id)
# Assert that the last event in the room was not a member event for the target user.
self.assertEqual(
join_updates[0].timeline.events[-1].content["membership"], "invite"
)
@override_config(
{
"auto_accept_invites": {
"enabled": True,
},
}
)
async def test_ignore_invite_for_locked_user(self) -> None:
"""Tests that receiving an invite for a suspended user is ignored."""
inviting_user_id = self.register_user("inviter", "pass", admin=True)
inviting_user_tok = self.login("inviter", "pass")
# A local user who receives an invite
invited_user_id = self.register_user("invitee", "pass")
# Create a room and send an invite to the other user
room_id = self.helper.create_room_as(
inviting_user_id,
tok=inviting_user_tok,
)
channel = self.make_request(
"PUT",
f"/_synapse/admin/v2/users/{invited_user_id}",
{"locked": True},
access_token=inviting_user_tok,
)
assert channel.code == 200
self.helper.invite(
room_id,
inviting_user_id,
invited_user_id,
tok=inviting_user_tok,
)
join_updates, b = sync_join(self, inviting_user_id)
# Assert that the last event in the room was not a member event for the target user.
self.assertEqual(
join_updates[0].timeline.events[-1].content["membership"], "invite"
)
_request_key = 0
@ -647,6 +810,22 @@ def create_module(
module_api.is_mine.side_effect = lambda a: a.split(":")[1] == "test"
module_api.worker_name = worker_name
module_api.sleep.return_value = make_multiple_awaitable(None)
module_api.get_userinfo_by_id.return_value = UserInfo(
user_id=UserID.from_string("@user:test"),
is_admin=False,
is_guest=False,
consent_server_notice_sent=None,
consent_ts=None,
consent_version=None,
appservice_id=None,
creation_ts=0,
user_type=None,
is_deactivated=False,
locked=False,
is_shadow_banned=False,
approved=True,
suspended=False,
)
if config_override is None:
config_override = {}

View file

@ -0,0 +1,161 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
# Originally licensed under the Apache License, Version 2.0:
# <http://www.apache.org/licenses/LICENSE-2.0>.
#
# [This file includes modifications made by New Vector Limited]
#
#
import logging
from unittest.mock import AsyncMock, Mock
from twisted.test.proto_helpers import MemoryReactor
from synapse.handlers.device import DeviceListUpdater
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util import Clock
from synapse.util.retryutils import NotRetryingDestination
from tests import unittest
logger = logging.getLogger(__name__)
class DeviceListResyncTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = self.hs.get_datastores().main
def test_retry_device_list_resync(self) -> None:
"""Tests that device lists are marked as stale if they couldn't be synced, and
that stale device lists are retried periodically.
"""
remote_user_id = "@john:test_remote"
remote_origin = "test_remote"
# Track the number of attempts to resync the user's device list.
self.resync_attempts = 0
# When this function is called, increment the number of resync attempts (only if
# we're querying devices for the right user ID), then raise a
# NotRetryingDestination error to fail the resync gracefully.
def query_user_devices(
destination: str, user_id: str, timeout: int = 30000
) -> JsonDict:
if user_id == remote_user_id:
self.resync_attempts += 1
raise NotRetryingDestination(0, 0, destination)
# Register the mock on the federation client.
federation_client = self.hs.get_federation_client()
federation_client.query_user_devices = Mock(side_effect=query_user_devices) # type: ignore[method-assign]
# Register a mock on the store so that the incoming update doesn't fail because
# we don't share a room with the user.
self.store.get_rooms_for_user = AsyncMock(return_value=["!someroom:test"])
# Manually inject a fake device list update. We need this update to include at
# least one prev_id so that the user's device list will need to be retried.
device_list_updater = self.hs.get_device_handler().device_list_updater
assert isinstance(device_list_updater, DeviceListUpdater)
self.get_success(
device_list_updater.incoming_device_list_update(
origin=remote_origin,
edu_content={
"deleted": False,
"device_display_name": "Mobile",
"device_id": "QBUAZIFURK",
"prev_id": [5],
"stream_id": 6,
"user_id": remote_user_id,
},
)
)
# Check that there was one resync attempt.
self.assertEqual(self.resync_attempts, 1)
# Check that the resync attempt failed and caused the user's device list to be
# marked as stale.
need_resync = self.get_success(
self.store.get_user_ids_requiring_device_list_resync()
)
self.assertIn(remote_user_id, need_resync)
# Check that waiting for 30 seconds caused Synapse to retry resyncing the device
# list.
self.reactor.advance(30)
self.assertEqual(self.resync_attempts, 2)
def test_cross_signing_keys_retry(self) -> None:
"""Tests that resyncing a device list correctly processes cross-signing keys from
the remote server.
"""
remote_user_id = "@john:test_remote"
remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY"
remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ"
# Register mock device list retrieval on the federation client.
federation_client = self.hs.get_federation_client()
federation_client.query_user_devices = AsyncMock( # type: ignore[method-assign]
return_value={
"user_id": remote_user_id,
"stream_id": 1,
"devices": [],
"master_key": {
"user_id": remote_user_id,
"usage": ["master"],
"keys": {"ed25519:" + remote_master_key: remote_master_key},
},
"self_signing_key": {
"user_id": remote_user_id,
"usage": ["self_signing"],
"keys": {
"ed25519:" + remote_self_signing_key: remote_self_signing_key
},
},
}
)
# Resync the device list.
device_handler = self.hs.get_device_handler()
self.get_success(
device_handler.device_list_updater.multi_user_device_resync(
[remote_user_id]
),
)
# Retrieve the cross-signing keys for this user.
keys = self.get_success(
self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]),
)
self.assertIn(remote_user_id, keys)
key = keys[remote_user_id]
assert key is not None
# Check that the master key is the one returned by the mock.
master_key = key["master"]
self.assertEqual(len(master_key["keys"]), 1)
self.assertTrue("ed25519:" + remote_master_key in master_key["keys"].keys())
self.assertTrue(remote_master_key in master_key["keys"].values())
# Check that the self-signing key is the one returned by the mock.
self_signing_key = key["self_signing"]
self.assertEqual(len(self_signing_key["keys"]), 1)
self.assertTrue(
"ed25519:" + remote_self_signing_key in self_signing_key["keys"].keys(),
)
self.assertTrue(remote_self_signing_key in self_signing_key["keys"].values())

View file

@ -0,0 +1,671 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright 2020 The Matrix.org Foundation C.I.C.
# Copyright (C) 2023 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
# Originally licensed under the Apache License, Version 2.0:
# <http://www.apache.org/licenses/LICENSE-2.0>.
#
# [This file includes modifications made by New Vector Limited]
#
#
import logging
import time
import urllib.parse
from http import HTTPStatus
from typing import Any, Callable, Optional, Set, Tuple, TypeVar, Union
from unittest.mock import Mock
import attr
from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.events import EventBase, make_event_from_dict
from synapse.events.utils import strip_event
from synapse.federation.federation_base import (
event_from_pdu_json,
)
from synapse.federation.transport.client import SendJoinResponse
from synapse.http.matrixfederationclient import (
ByteParser,
)
from synapse.http.types import QueryParams
from synapse.rest import admin
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.types import JsonDict, MutableStateMap, StateMap
from synapse.types.handlers.sliding_sync import (
StateValues,
)
from synapse.util import Clock
from tests import unittest
from tests.utils import test_timeout
logger = logging.getLogger(__name__)
def required_state_json_to_state_map(required_state: Any) -> StateMap[EventBase]:
state_map: MutableStateMap[EventBase] = {}
# Scrutinize JSON values to ensure it's in the expected format
if isinstance(required_state, list):
for state_event_dict in required_state:
# Yell because we're in a test and this is unexpected
assert isinstance(
state_event_dict, dict
), "`required_state` should be a list of event dicts"
event_type = state_event_dict["type"]
event_state_key = state_event_dict["state_key"]
# Yell because we're in a test and this is unexpected
assert isinstance(
event_type, str
), "Each event in `required_state` should have a string `type`"
assert isinstance(
event_state_key, str
), "Each event in `required_state` should have a string `state_key`"
state_map[(event_type, event_state_key)] = make_event_from_dict(
state_event_dict
)
else:
# Yell because we're in a test and this is unexpected
raise AssertionError("`required_state` should be a list of event dicts")
return state_map
@attr.s(slots=True, auto_attribs=True)
class RemoteRoomJoinResult:
remote_room_id: str
room_version: RoomVersion
remote_room_creator_user_id: str
local_user1_id: str
local_user1_tok: str
state_map: StateMap[EventBase]
class OutOfBandMembershipTests(unittest.FederatingHomeserverTestCase):
"""
Tests to make sure that interactions with out-of-band membership (outliers) works as
expected.
- invites received over federation, before we join the room
- *rejections* for said invites
See the "Out-of-band membership events" section in
`docs/development/room-dag-concepts.md` for more information.
"""
servlets = [
admin.register_servlets,
room.register_servlets,
login.register_servlets,
sync.register_servlets,
]
sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync"
def default_config(self) -> JsonDict:
conf = super().default_config()
# Federation sending is disabled by default in the test environment
# so we need to enable it like this.
conf["federation_sender_instances"] = ["master"]
return conf
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.federation_http_client = Mock(
# The problem with using `spec=MatrixFederationHttpClient` here is that it
# requires everything to be mocked which is a lot of work that I don't want
# to do when the code only uses a few methods (`get_json` and `put_json`).
)
return self.setup_test_homeserver(
federation_http_client=self.federation_http_client
)
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
super().prepare(reactor, clock, hs)
self.store = self.hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
def do_sync(
self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str
) -> Tuple[JsonDict, str]:
"""Do a sliding sync request with given body.
Asserts the request was successful.
Attributes:
sync_body: The full request body to use
since: Optional since token
tok: Access token to use
Returns:
A tuple of the response body and the `pos` field.
"""
sync_path = self.sync_endpoint
if since:
sync_path += f"?pos={since}"
channel = self.make_request(
method="POST",
path=sync_path,
content=sync_body,
access_token=tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
return channel.json_body, channel.json_body["pos"]
def _invite_local_user_to_remote_room_and_join(self) -> RemoteRoomJoinResult:
"""
Helper to reproduce this scenario:
1. The remote user invites our local user to a room on their remote server (which
creates an out-of-band invite membership for user1 on our local server).
2. The local user notices the invite from `/sync`.
3. The local user joins the room.
4. The local user can see that they are now joined to the room from `/sync`.
"""
# Create a local user
local_user1_id = self.register_user("user1", "pass")
local_user1_tok = self.login(local_user1_id, "pass")
# Create a remote room
room_creator_user_id = f"@remote-user:{self.OTHER_SERVER_NAME}"
remote_room_id = f"!remote-room:{self.OTHER_SERVER_NAME}"
room_version = RoomVersions.V10
room_create_event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"room_id": remote_room_id,
"sender": room_creator_user_id,
"depth": 1,
"origin_server_ts": 1,
"type": EventTypes.Create,
"state_key": "",
"content": {
# The `ROOM_CREATOR` field could be removed if we used a room
# version > 10 (in favor of relying on `sender`)
EventContentFields.ROOM_CREATOR: room_creator_user_id,
EventContentFields.ROOM_VERSION: room_version.identifier,
},
"auth_events": [],
"prev_events": [],
}
),
room_version=room_version,
)
creator_membership_event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"room_id": remote_room_id,
"sender": room_creator_user_id,
"depth": 2,
"origin_server_ts": 2,
"type": EventTypes.Member,
"state_key": room_creator_user_id,
"content": {"membership": Membership.JOIN},
"auth_events": [room_create_event.event_id],
"prev_events": [room_create_event.event_id],
}
),
room_version=room_version,
)
# From the remote homeserver, invite user1 on the local homserver
user1_invite_membership_event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"room_id": remote_room_id,
"sender": room_creator_user_id,
"depth": 3,
"origin_server_ts": 3,
"type": EventTypes.Member,
"state_key": local_user1_id,
"content": {"membership": Membership.INVITE},
"auth_events": [
room_create_event.event_id,
creator_membership_event.event_id,
],
"prev_events": [creator_membership_event.event_id],
}
),
room_version=room_version,
)
channel = self.make_signed_federation_request(
"PUT",
f"/_matrix/federation/v2/invite/{remote_room_id}/{user1_invite_membership_event.event_id}",
content={
"event": user1_invite_membership_event.get_dict(),
"invite_room_state": [
strip_event(room_create_event),
],
"room_version": room_version.identifier,
},
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [(EventTypes.Member, StateValues.WILDCARD)],
"timeline_limit": 0,
}
}
}
# Sync until the local user1 can see the invite
with test_timeout(
3,
"Unable to find user1's invite event in the room",
):
while True:
response_body, _ = self.do_sync(sync_body, tok=local_user1_tok)
if (
remote_room_id in response_body["rooms"].keys()
# If they have `invite_state` for the room, they are invited
and len(
response_body["rooms"][remote_room_id].get("invite_state", [])
)
> 0
):
break
# Prevent tight-looping to allow the `test_timeout` to work
time.sleep(0.1)
user1_join_membership_event_template = make_event_from_dict(
{
"room_id": remote_room_id,
"sender": local_user1_id,
"depth": 4,
"origin_server_ts": 4,
"type": EventTypes.Member,
"state_key": local_user1_id,
"content": {"membership": Membership.JOIN},
"auth_events": [
room_create_event.event_id,
user1_invite_membership_event.event_id,
],
"prev_events": [user1_invite_membership_event.event_id],
},
room_version=room_version,
)
T = TypeVar("T")
# Mock the remote homeserver responding to our HTTP requests
#
# We're going to mock the following endpoints so that user1 can join the remote room:
# - GET /_matrix/federation/v1/make_join/{room_id}/{user_id}
# - PUT /_matrix/federation/v2/send_join/{room_id}/{user_id}
#
async def get_json(
destination: str,
path: str,
args: Optional[QueryParams] = None,
retry_on_dns_fail: bool = True,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Optional[ByteParser[T]] = None,
) -> Union[JsonDict, T]:
if (
path
== f"/_matrix/federation/v1/make_join/{urllib.parse.quote_plus(remote_room_id)}/{urllib.parse.quote_plus(local_user1_id)}"
):
return {
"event": user1_join_membership_event_template.get_pdu_json(),
"room_version": room_version.identifier,
}
raise NotImplementedError(
"We have not mocked a response for `get_json(...)` for the following endpoint yet: "
+ f"{destination}{path}"
)
self.federation_http_client.get_json.side_effect = get_json
# PDU's that hs1 sent to hs2
collected_pdus_from_hs1_federation_send: Set[str] = set()
async def put_json(
destination: str,
path: str,
args: Optional[QueryParams] = None,
data: Optional[JsonDict] = None,
json_data_callback: Optional[Callable[[], JsonDict]] = None,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Optional[ByteParser[T]] = None,
backoff_on_all_error_codes: bool = False,
) -> Union[JsonDict, T, SendJoinResponse]:
if (
path.startswith(
f"/_matrix/federation/v2/send_join/{urllib.parse.quote_plus(remote_room_id)}/"
)
and data is not None
and data.get("type") == EventTypes.Member
and data.get("state_key") == local_user1_id
# We're assuming this is a `ByteParser[SendJoinResponse]`
and parser is not None
):
# As the remote server, we need to sign the event before sending it back
user1_join_membership_event_signed = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(data),
room_version=room_version,
)
# Since they passed in a `parser`, we need to return the type that
# they're expecting instead of just a `JsonDict`
return SendJoinResponse(
auth_events=[
room_create_event,
user1_invite_membership_event,
],
state=[
room_create_event,
creator_membership_event,
user1_invite_membership_event,
],
event_dict=user1_join_membership_event_signed.get_pdu_json(),
event=user1_join_membership_event_signed,
members_omitted=False,
servers_in_room=[
self.OTHER_SERVER_NAME,
],
)
if path.startswith("/_matrix/federation/v1/send/") and data is not None:
for pdu in data.get("pdus", []):
event = event_from_pdu_json(pdu, room_version)
collected_pdus_from_hs1_federation_send.add(event.event_id)
# Just acknowledge everything hs1 is trying to send hs2
return {
event_from_pdu_json(pdu, room_version).event_id: {}
for pdu in data.get("pdus", [])
}
raise NotImplementedError(
"We have not mocked a response for `put_json(...)` for the following endpoint yet: "
+ f"{destination}{path} with the following body data: {data}"
)
self.federation_http_client.put_json.side_effect = put_json
# User1 joins the room
self.helper.join(remote_room_id, local_user1_id, tok=local_user1_tok)
# Reset the mocks now that user1 has joined the room
self.federation_http_client.get_json.side_effect = None
self.federation_http_client.put_json.side_effect = None
# Sync until the local user1 can see that they are now joined to the room
with test_timeout(
3,
"Unable to find user1's join event in the room",
):
while True:
response_body, _ = self.do_sync(sync_body, tok=local_user1_tok)
if remote_room_id in response_body["rooms"].keys():
required_state_map = required_state_json_to_state_map(
response_body["rooms"][remote_room_id]["required_state"]
)
if (
required_state_map.get((EventTypes.Member, local_user1_id))
is not None
):
break
# Prevent tight-looping to allow the `test_timeout` to work
time.sleep(0.1)
# Nothing needs to be sent from hs1 to hs2 since we already let the other
# homeserver know by doing the `/make_join` and `/send_join` dance.
self.assertIncludes(
collected_pdus_from_hs1_federation_send,
set(),
exact=True,
message="Didn't expect any events to be sent from hs1 over federation to hs2",
)
return RemoteRoomJoinResult(
remote_room_id=remote_room_id,
room_version=room_version,
remote_room_creator_user_id=room_creator_user_id,
local_user1_id=local_user1_id,
local_user1_tok=local_user1_tok,
state_map=self.get_success(
self.storage_controllers.state.get_current_state(remote_room_id)
),
)
def test_can_join_from_out_of_band_invite(self) -> None:
"""
Test to make sure that we can join a room that we were invited to over
federation; even if our server has never participated in the room before.
"""
self._invite_local_user_to_remote_room_and_join()
@parameterized.expand(
[("accept invite", Membership.JOIN), ("reject invite", Membership.LEAVE)]
)
def test_can_x_from_out_of_band_invite_after_we_are_already_participating_in_the_room(
self, _test_description: str, membership_action: str
) -> None:
"""
Test to make sure that we can do either a) join the room (accept the invite) or
b) reject the invite after being invited to over federation; even if we are
already participating in the room.
This is a regression test to make sure we stress the scenario where even though
we are already participating in the room, local users can still react to invites
regardless of whether the remote server has told us about the invite event (via
a federation `/send` transaction) and we have de-outliered the invite event.
Previously, we would mistakenly throw an error saying the user wasn't in the
room when they tried to join or reject the invite.
"""
remote_room_join_result = self._invite_local_user_to_remote_room_and_join()
remote_room_id = remote_room_join_result.remote_room_id
room_version = remote_room_join_result.room_version
# Create another local user
local_user2_id = self.register_user("user2", "pass")
local_user2_tok = self.login(local_user2_id, "pass")
T = TypeVar("T")
# PDU's that hs1 sent to hs2
collected_pdus_from_hs1_federation_send: Set[str] = set()
async def put_json(
destination: str,
path: str,
args: Optional[QueryParams] = None,
data: Optional[JsonDict] = None,
json_data_callback: Optional[Callable[[], JsonDict]] = None,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Optional[ByteParser[T]] = None,
backoff_on_all_error_codes: bool = False,
) -> Union[JsonDict, T]:
if path.startswith("/_matrix/federation/v1/send/") and data is not None:
for pdu in data.get("pdus", []):
event = event_from_pdu_json(pdu, room_version)
collected_pdus_from_hs1_federation_send.add(event.event_id)
# Just acknowledge everything hs1 is trying to send hs2
return {
event_from_pdu_json(pdu, room_version).event_id: {}
for pdu in data.get("pdus", [])
}
raise NotImplementedError(
"We have not mocked a response for `put_json(...)` for the following endpoint yet: "
+ f"{destination}{path} with the following body data: {data}"
)
self.federation_http_client.put_json.side_effect = put_json
# From the remote homeserver, invite user2 on the local homserver
user2_invite_membership_event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"room_id": remote_room_id,
"sender": remote_room_join_result.remote_room_creator_user_id,
"depth": 5,
"origin_server_ts": 5,
"type": EventTypes.Member,
"state_key": local_user2_id,
"content": {"membership": Membership.INVITE},
"auth_events": [
remote_room_join_result.state_map[
(EventTypes.Create, "")
].event_id,
remote_room_join_result.state_map[
(
EventTypes.Member,
remote_room_join_result.remote_room_creator_user_id,
)
].event_id,
],
"prev_events": [
remote_room_join_result.state_map[
(EventTypes.Member, remote_room_join_result.local_user1_id)
].event_id
],
}
),
room_version=room_version,
)
channel = self.make_signed_federation_request(
"PUT",
f"/_matrix/federation/v2/invite/{remote_room_id}/{user2_invite_membership_event.event_id}",
content={
"event": user2_invite_membership_event.get_dict(),
"invite_room_state": [
strip_event(
remote_room_join_result.state_map[(EventTypes.Create, "")]
),
],
"room_version": room_version.identifier,
},
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [(EventTypes.Member, StateValues.WILDCARD)],
"timeline_limit": 0,
}
}
}
# Sync until the local user2 can see the invite
with test_timeout(
3,
"Unable to find user2's invite event in the room",
):
while True:
response_body, _ = self.do_sync(sync_body, tok=local_user2_tok)
if (
remote_room_id in response_body["rooms"].keys()
# If they have `invite_state` for the room, they are invited
and len(
response_body["rooms"][remote_room_id].get("invite_state", [])
)
> 0
):
break
# Prevent tight-looping to allow the `test_timeout` to work
time.sleep(0.1)
if membership_action == Membership.JOIN:
# User2 joins the room
join_event = self.helper.join(
remote_room_join_result.remote_room_id,
local_user2_id,
tok=local_user2_tok,
)
expected_pdu_event_id = join_event["event_id"]
elif membership_action == Membership.LEAVE:
# User2 rejects the invite
leave_event = self.helper.leave(
remote_room_join_result.remote_room_id,
local_user2_id,
tok=local_user2_tok,
)
expected_pdu_event_id = leave_event["event_id"]
else:
raise NotImplementedError(
"This test does not support this membership action yet"
)
# Sync until the local user2 can see their new membership in the room
with test_timeout(
3,
"Unable to find user2's new membership event in the room",
):
while True:
response_body, _ = self.do_sync(sync_body, tok=local_user2_tok)
if membership_action == Membership.JOIN:
if remote_room_id in response_body["rooms"].keys():
required_state_map = required_state_json_to_state_map(
response_body["rooms"][remote_room_id]["required_state"]
)
if (
required_state_map.get((EventTypes.Member, local_user2_id))
is not None
):
break
elif membership_action == Membership.LEAVE:
if remote_room_id not in response_body["rooms"].keys():
break
else:
raise NotImplementedError(
"This test does not support this membership action yet"
)
# Prevent tight-looping to allow the `test_timeout` to work
time.sleep(0.1)
# Make sure that we let hs2 know about the new membership event
self.assertIncludes(
collected_pdus_from_hs1_federation_send,
{expected_pdu_event_id},
exact=True,
message="Expected to find the event ID of the user2 membership to be sent from hs1 over federation to hs2",
)

View file

@ -20,14 +20,21 @@
#
import logging
from http import HTTPStatus
from typing import Optional, Union
from unittest.mock import Mock
from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import FederationError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.config.server import DEFAULT_ROOM_VERSION
from synapse.events import EventBase, make_event_from_dict
from synapse.federation.federation_base import event_from_pdu_json
from synapse.http.types import QueryParams
from synapse.logging.context import LoggingContext
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
@ -85,6 +92,163 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase):
self.assertEqual(500, channel.code, channel.result)
def _create_acl_event(content: JsonDict) -> EventBase:
return make_event_from_dict(
{
"room_id": "!a:b",
"event_id": "$a:b",
"type": "m.room.server_acls",
"sender": "@a:b",
"content": content,
}
)
class MessageAcceptTests(unittest.FederatingHomeserverTestCase):
"""
Tests to make sure that we don't accept flawed events from federation (incoming).
"""
servlets = [
admin.register_servlets,
login.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.http_client = Mock()
return self.setup_test_homeserver(federation_http_client=self.http_client)
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
super().prepare(reactor, clock, hs)
self.store = self.hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
self.federation_event_handler = self.hs.get_federation_event_handler()
# Create a local room
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
self.room_id = self.helper.create_room_as(
user1_id, tok=user1_tok, is_public=True
)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(self.room_id)
)
# Figure out what the forward extremities in the room are (the most recent
# events that aren't tied into the DAG)
forward_extremity_event_ids = self.get_success(
self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id)
)
# Join a remote user to the room that will attempt to send bad events
self.remote_bad_user_id = f"@baduser:{self.OTHER_SERVER_NAME}"
self.remote_bad_user_join_event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"room_id": self.room_id,
"sender": self.remote_bad_user_id,
"state_key": self.remote_bad_user_id,
"depth": 1000,
"origin_server_ts": 1,
"type": EventTypes.Member,
"content": {"membership": Membership.JOIN},
"auth_events": [
state_map[(EventTypes.Create, "")].event_id,
state_map[(EventTypes.JoinRules, "")].event_id,
],
"prev_events": list(forward_extremity_event_ids),
}
),
room_version=RoomVersions.V10,
)
# Send the join, it should return None (which is not an error)
self.assertEqual(
self.get_success(
self.federation_event_handler.on_receive_pdu(
self.OTHER_SERVER_NAME, self.remote_bad_user_join_event
)
),
None,
)
# Make sure we actually joined the room
self.assertEqual(
self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)),
{self.remote_bad_user_join_event.event_id},
)
def test_cant_hide_direct_ancestors(self) -> None:
"""
If you send a message, you must be able to provide the direct
prev_events that said event references.
"""
async def post_json(
destination: str,
path: str,
data: Optional[JsonDict] = None,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
args: Optional[QueryParams] = None,
) -> Union[JsonDict, list]:
# If it asks us for new missing events, give them NOTHING
if path.startswith("/_matrix/federation/v1/get_missing_events/"):
return {"events": []}
return {}
self.http_client.post_json = post_json
# Figure out what the forward extremities in the room are (the most recent
# events that aren't tied into the DAG)
forward_extremity_event_ids = self.get_success(
self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id)
)
# Now lie about an event's prev_events
lying_event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"room_id": self.room_id,
"sender": self.remote_bad_user_id,
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.message",
"content": {"body": "hewwo?"},
"auth_events": [],
"prev_events": ["$missing_prev_event"]
+ list(forward_extremity_event_ids),
}
),
room_version=RoomVersions.V10,
)
with LoggingContext("test-context"):
failure = self.get_failure(
self.federation_event_handler.on_receive_pdu(
self.OTHER_SERVER_NAME, lying_event
),
FederationError,
)
# on_receive_pdu should throw an error
self.assertEqual(
failure.value.args[0],
(
"ERROR 403: Your server isn't divulging details about prev_events "
"referenced in this event."
),
)
# Make sure the invalid event isn't there
extrem = self.get_success(self.store.get_latest_event_ids_in_room(self.room_id))
self.assertEqual(extrem, {self.remote_bad_user_join_event.event_id})
class ServerACLsTestCase(unittest.TestCase):
def test_blocked_server(self) -> None:
e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]})
@ -355,13 +519,76 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase):
# is probably sufficient to reassure that the bucket is updated.
def _create_acl_event(content: JsonDict) -> EventBase:
return make_event_from_dict(
{
"room_id": "!a:b",
"event_id": "$a:b",
"type": "m.room.server_acls",
"sender": "@a:b",
"content": content,
class StripUnsignedFromEventsTestCase(unittest.TestCase):
"""
Test to make sure that we handle the raw JSON events from federation carefully and
strip anything that shouldn't be there.
"""
def test_strip_unauthorized_unsigned_values(self) -> None:
event1 = {
"sender": "@baduser:test.serv",
"state_key": "@baduser:test.serv",
"event_id": "$event1:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.member",
"origin": "test.servx",
"content": {"membership": "join"},
"auth_events": [],
"unsigned": {"malicious garbage": "hackz", "more warez": "more hackz"},
}
)
filtered_event = event_from_pdu_json(event1, RoomVersions.V1)
# Make sure unauthorized fields are stripped from unsigned
self.assertNotIn("more warez", filtered_event.unsigned)
def test_strip_event_maintains_allowed_fields(self) -> None:
event2 = {
"sender": "@baduser:test.serv",
"state_key": "@baduser:test.serv",
"event_id": "$event2:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.member",
"origin": "test.servx",
"auth_events": [],
"content": {"membership": "join"},
"unsigned": {
"malicious garbage": "hackz",
"more warez": "more hackz",
"age": 14,
"invite_room_state": [],
},
}
filtered_event2 = event_from_pdu_json(event2, RoomVersions.V1)
self.assertIn("age", filtered_event2.unsigned)
self.assertEqual(14, filtered_event2.unsigned["age"])
self.assertNotIn("more warez", filtered_event2.unsigned)
# Invite_room_state is allowed in events of type m.room.member
self.assertIn("invite_room_state", filtered_event2.unsigned)
self.assertEqual([], filtered_event2.unsigned["invite_room_state"])
def test_strip_event_removes_fields_based_on_event_type(self) -> None:
event3 = {
"sender": "@baduser:test.serv",
"state_key": "@baduser:test.serv",
"event_id": "$event3:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.power_levels",
"origin": "test.servx",
"content": {},
"auth_events": [],
"unsigned": {
"malicious garbage": "hackz",
"more warez": "more hackz",
"age": 14,
"invite_room_state": [],
},
}
filtered_event3 = event_from_pdu_json(event3, RoomVersions.V1)
self.assertIn("age", filtered_event3.unsigned)
# Invite_room_state field is only permitted in event type m.room.member
self.assertNotIn("invite_room_state", filtered_event3.unsigned)
self.assertNotIn("more warez", filtered_event3.unsigned)

View file

@ -375,7 +375,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
In this test, we pretend we are processing a "pulled" event via
backfill. The pulled event succesfully processes and the backward
extremeties are updated along with clearing out any failed pull attempts
extremities are updated along with clearing out any failed pull attempts
for those old extremities.
We check that we correctly cleared failed pull attempts of the
@ -807,6 +807,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
main_store = self.hs.get_datastores().main
state_deletion_store = self.hs.get_datastores().state_deletion
# Create the room.
kermit_user_id = self.register_user("kermit", "test")
@ -958,7 +959,9 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
bert_member_event.event_id: bert_member_event,
rejected_kick_event.event_id: rejected_kick_event,
},
state_res_store=StateResolutionStore(main_store),
state_res_store=StateResolutionStore(
main_store, state_deletion_store
),
)
),
[bert_member_event.event_id, rejected_kick_event.event_id],
@ -1003,7 +1006,9 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
rejected_power_levels_event.event_id,
],
event_map={},
state_res_store=StateResolutionStore(main_store),
state_res_store=StateResolutionStore(
main_store, state_deletion_store
),
full_conflicted_set=set(),
)
),

View file

@ -23,14 +23,21 @@ from typing import Optional, cast
from unittest.mock import Mock, call
from parameterized import parameterized
from signedjson.key import generate_signing_key
from signedjson.key import (
encode_verify_key_base64,
generate_signing_key,
get_verify_key,
)
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes, Membership, PresenceState
from synapse.api.presence import UserDevicePresenceState, UserPresenceState
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events.builder import EventBuilder
from synapse.api.room_versions import (
RoomVersion,
)
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.events import EventBase, make_event_from_dict
from synapse.federation.sender import FederationSender
from synapse.handlers.presence import (
BUSY_ONLINE_TIMEOUT,
@ -45,18 +52,24 @@ from synapse.handlers.presence import (
handle_update,
)
from synapse.rest import admin
from synapse.rest.client import room
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.storage.database import LoggingDatabaseConnection
from synapse.storage.keys import FetchKeyResult
from synapse.types import JsonDict, UserID, get_domain_from_id
from synapse.util import Clock
from tests import unittest
from tests.replication._base import BaseMultiWorkerStreamTestCase
from tests.unittest import override_config
class PresenceUpdateTestCase(unittest.HomeserverTestCase):
servlets = [admin.register_servlets]
servlets = [
admin.register_servlets,
login.register_servlets,
sync.register_servlets,
]
def prepare(
self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
@ -425,6 +438,102 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase):
wheel_timer.insert.assert_not_called()
# `rc_presence` is set very high during unit tests to avoid ratelimiting
# subtly impacting unrelated tests. We set the ratelimiting back to a
# reasonable value for the tests specific to presence ratelimiting.
@override_config(
{"rc_presence": {"per_user": {"per_second": 0.1, "burst_count": 1}}}
)
def test_over_ratelimit_offline_to_online_to_unavailable(self) -> None:
"""
Send a presence update, check that it went through, immediately send another one and
check that it was ignored.
"""
self._test_ratelimit_offline_to_online_to_unavailable(ratelimited=True)
@override_config(
{"rc_presence": {"per_user": {"per_second": 0.1, "burst_count": 1}}}
)
def test_within_ratelimit_offline_to_online_to_unavailable(self) -> None:
"""
Send a presence update, check that it went through, advancing time a sufficient amount,
send another presence update and check that it also worked.
"""
self._test_ratelimit_offline_to_online_to_unavailable(ratelimited=False)
@override_config(
{"rc_presence": {"per_user": {"per_second": 0.1, "burst_count": 1}}}
)
def _test_ratelimit_offline_to_online_to_unavailable(
self, ratelimited: bool
) -> None:
"""Test rate limit for presence updates sent with sync requests.
Args:
ratelimited: Test rate limited case.
"""
wheel_timer = Mock()
user_id = "@user:pass"
now = 5000000
sync_url = "/sync?access_token=%s&set_presence=%s"
# Register the user who syncs presence
user_id = self.register_user("user", "pass")
access_token = self.login("user", "pass")
# Get the handler (which kicks off a bunch of timers).
presence_handler = self.hs.get_presence_handler()
# Ensure the user is initially offline.
prev_state = UserPresenceState.default(user_id)
new_state = prev_state.copy_and_replace(
state=PresenceState.OFFLINE, last_active_ts=now
)
state, persist_and_notify, federation_ping = handle_update(
prev_state,
new_state,
is_mine=True,
wheel_timer=wheel_timer,
now=now,
persist=False,
)
# Check that the user is offline.
state = self.get_success(
presence_handler.get_state(UserID.from_string(user_id))
)
self.assertEqual(state.state, PresenceState.OFFLINE)
# Send sync request with set_presence=online.
channel = self.make_request("GET", sync_url % (access_token, "online"))
self.assertEqual(200, channel.code)
# Assert the user is now online.
state = self.get_success(
presence_handler.get_state(UserID.from_string(user_id))
)
self.assertEqual(state.state, PresenceState.ONLINE)
if not ratelimited:
# Advance time a sufficient amount to avoid rate limiting.
self.reactor.advance(30)
# Send another sync request with set_presence=unavailable.
channel = self.make_request("GET", sync_url % (access_token, "unavailable"))
self.assertEqual(200, channel.code)
state = self.get_success(
presence_handler.get_state(UserID.from_string(user_id))
)
if ratelimited:
# Assert the user is still online and presence update was ignored.
self.assertEqual(state.state, PresenceState.ONLINE)
else:
# Assert the user is now unavailable.
self.assertEqual(state.state, PresenceState.UNAVAILABLE)
class PresenceTimeoutTestCase(unittest.TestCase):
"""Tests different timers and that the timer does not change `status_msg` of user."""
@ -1825,6 +1934,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
# self.event_builder_for_2.hostname = "test2"
self.store = hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers()
self.state = hs.get_state_handler()
self._event_auth_handler = hs.get_event_auth_handler()
@ -1940,29 +2050,35 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
hostname = get_domain_from_id(user_id)
room_version = self.get_success(self.store.get_room_version_id(room_id))
room_version = self.get_success(self.store.get_room_version(room_id))
builder = EventBuilder(
state=self.state,
event_auth_handler=self._event_auth_handler,
store=self.store,
clock=self.clock,
hostname=hostname,
signing_key=self.random_signing_key,
room_version=KNOWN_ROOM_VERSIONS[room_version],
room_id=room_id,
type=EventTypes.Member,
sender=user_id,
state_key=user_id,
content={"membership": Membership.JOIN},
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id)
)
prev_event_ids = self.get_success(
self.store.get_latest_event_ids_in_room(room_id)
# Figure out what the forward extremities in the room are (the most recent
# events that aren't tied into the DAG)
forward_extremity_event_ids = self.get_success(
self.hs.get_datastores().main.get_latest_event_ids_in_room(room_id)
)
event = self.get_success(
builder.build(prev_event_ids=list(prev_event_ids), auth_event_ids=None)
event = self.create_fake_event_from_remote_server(
remote_server_name=hostname,
event_dict={
"room_id": room_id,
"sender": user_id,
"type": EventTypes.Member,
"state_key": user_id,
"depth": 1000,
"origin_server_ts": 1,
"content": {"membership": Membership.JOIN},
"auth_events": [
state_map[(EventTypes.Create, "")].event_id,
state_map[(EventTypes.JoinRules, "")].event_id,
],
"prev_events": list(forward_extremity_event_ids),
},
room_version=room_version,
)
self.get_success(self.federation_event_handler.on_receive_pdu(hostname, event))
@ -1970,3 +2086,50 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
# Check that it was successfully persisted.
self.get_success(self.store.get_event(event.event_id))
self.get_success(self.store.get_event(event.event_id))
def create_fake_event_from_remote_server(
self, remote_server_name: str, event_dict: JsonDict, room_version: RoomVersion
) -> EventBase:
"""
This is similar to what `FederatingHomeserverTestCase` is doing but we don't
need all of the extra baggage and we want to be able to create an event from
many remote servers.
"""
# poke the other server's signing key into the key store, so that we don't
# make requests for it
other_server_signature_key = generate_signing_key("test")
verify_key = get_verify_key(other_server_signature_key)
verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version)
self.get_success(
self.hs.get_datastores().main.store_server_keys_response(
remote_server_name,
from_server=remote_server_name,
ts_added_ms=self.clock.time_msec(),
verify_keys={
verify_key_id: FetchKeyResult(
verify_key=verify_key,
valid_until_ts=self.clock.time_msec() + 10000,
),
},
response_json={
"verify_keys": {
verify_key_id: {"key": encode_verify_key_base64(verify_key)}
}
},
)
)
add_hashes_and_signatures(
room_version=room_version,
event_dict=event_dict,
signature_name=remote_server_name,
signing_key=other_server_signature_key,
)
event = make_event_from_dict(
event_dict,
room_version=room_version,
)
return event

View file

@ -17,6 +17,7 @@
# [This file includes modifications made by New Vector Limited]
#
#
from http import HTTPStatus
from typing import Collection, ContextManager, List, Optional
from unittest.mock import AsyncMock, Mock, patch
@ -347,7 +348,15 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# the prev_events used when creating the join event, such that the ban does not
# precede the join.
with self._patch_get_latest_events([last_room_creation_event_id]):
self.helper.join(room_id, eve, tok=eve_token)
self.helper.join(
room_id,
eve,
tok=eve_token,
# Previously, this join would succeed but now we expect it to fail at
# this point. The rest of the test is for the case when this used to
# succeed.
expect_code=HTTPStatus.FORBIDDEN,
)
# Eve makes a second, incremental sync.
eve_incremental_sync_after_join: SyncResult = self.get_success(

View file

@ -22,14 +22,26 @@ import logging
from unittest.mock import AsyncMock, Mock
from netaddr import IPSet
from signedjson.key import (
encode_verify_key_base64,
generate_signing_key,
get_verify_key,
)
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes, Membership
from synapse.events.builder import EventBuilderFactory
from synapse.api.room_versions import RoomVersion
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.events import EventBase, make_event_from_dict
from synapse.handlers.typing import TypingWriterHandler
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.rest.admin import register_servlets_for_client_rest_resource
from synapse.rest.client import login, room
from synapse.types import UserID, create_requester
from synapse.server import HomeServer
from synapse.storage.keys import FetchKeyResult
from synapse.types import JsonDict, UserID, create_requester
from synapse.util import Clock
from tests.replication._base import BaseMultiWorkerStreamTestCase
from tests.server import get_clock
@ -63,6 +75,9 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
ip_blocklist=IPSet(),
)
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.storage_controllers = hs.get_storage_controllers()
def test_send_event_single_sender(self) -> None:
"""Test that using a single federation sender worker correctly sends a
new event.
@ -243,35 +258,92 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
self.assertTrue(sent_on_1)
self.assertTrue(sent_on_2)
def create_fake_event_from_remote_server(
self, remote_server_name: str, event_dict: JsonDict, room_version: RoomVersion
) -> EventBase:
"""
This is similar to what `FederatingHomeserverTestCase` is doing but we don't
need all of the extra baggage and we want to be able to create an event from
many remote servers.
"""
# poke the other server's signing key into the key store, so that we don't
# make requests for it
other_server_signature_key = generate_signing_key("test")
verify_key = get_verify_key(other_server_signature_key)
verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version)
self.get_success(
self.hs.get_datastores().main.store_server_keys_response(
remote_server_name,
from_server=remote_server_name,
ts_added_ms=self.clock.time_msec(),
verify_keys={
verify_key_id: FetchKeyResult(
verify_key=verify_key,
valid_until_ts=self.clock.time_msec() + 10000,
),
},
response_json={
"verify_keys": {
verify_key_id: {"key": encode_verify_key_base64(verify_key)}
}
},
)
)
add_hashes_and_signatures(
room_version=room_version,
event_dict=event_dict,
signature_name=remote_server_name,
signing_key=other_server_signature_key,
)
event = make_event_from_dict(
event_dict,
room_version=room_version,
)
return event
def create_room_with_remote_server(
self, user: str, token: str, remote_server: str = "other_server"
) -> str:
room = self.helper.create_room_as(user, tok=token)
room_id = self.helper.create_room_as(user, tok=token)
store = self.hs.get_datastores().main
federation = self.hs.get_federation_event_handler()
prev_event_ids = self.get_success(store.get_latest_event_ids_in_room(room))
room_version = self.get_success(store.get_room_version(room))
room_version = self.get_success(store.get_room_version(room_id))
factory = EventBuilderFactory(self.hs)
factory.hostname = remote_server
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id)
)
# Figure out what the forward extremities in the room are (the most recent
# events that aren't tied into the DAG)
prev_event_ids = self.get_success(store.get_latest_event_ids_in_room(room_id))
user_id = UserID("user", remote_server).to_string()
event_dict = {
"type": EventTypes.Member,
"state_key": user_id,
"content": {"membership": Membership.JOIN},
"sender": user_id,
"room_id": room,
}
builder = factory.for_room_version(room_version, event_dict)
join_event = self.get_success(
builder.build(prev_event_ids=list(prev_event_ids), auth_event_ids=None)
join_event = self.create_fake_event_from_remote_server(
remote_server_name=remote_server,
event_dict={
"room_id": room_id,
"sender": user_id,
"type": EventTypes.Member,
"state_key": user_id,
"depth": 1000,
"origin_server_ts": 1,
"content": {"membership": Membership.JOIN},
"auth_events": [
state_map[(EventTypes.Create, "")].event_id,
state_map[(EventTypes.JoinRules, "")].event_id,
],
"prev_events": list(prev_event_ids),
},
room_version=room_version,
)
self.get_success(federation.on_send_membership_event(remote_server, join_event))
self.replicate()
return room
return room_id

View file

@ -2035,6 +2035,52 @@ class RoomTestCase(unittest.HomeserverTestCase):
# the create_room already does the right thing, so no need to verify that we got
# the state events it created.
def test_room_state_param(self) -> None:
"""Test that filtering by state event type works when requesting state"""
room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
channel = self.make_request(
"GET",
f"/_synapse/admin/v1/rooms/{room_id}/state?type=m.room.member",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code)
state = channel.json_body["state"]
# only one member has joined so there should be one membership event
self.assertEqual(1, len(state))
event = state[0]
self.assertEqual(event["type"], "m.room.member")
self.assertEqual(event["state_key"], self.admin_user)
def test_room_state_param_empty(self) -> None:
"""Test that passing an empty string as state filter param returns no state events"""
room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
channel = self.make_request(
"GET",
f"/_synapse/admin/v1/rooms/{room_id}/state?type=",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code)
state = channel.json_body["state"]
self.assertEqual(5, len(state))
def test_room_state_param_not_in_room(self) -> None:
"""
Test that passing a state filter param for a state event not in the room
returns no state events
"""
room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
channel = self.make_request(
"GET",
f"/_synapse/admin/v1/rooms/{room_id}/state?type=m.room.custom",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code)
state = channel.json_body["state"]
self.assertEqual(0, len(state))
def _set_canonical_alias(
self, room_id: str, test_alias: str, admin_user_tok: str
) -> None:

View file

@ -1169,12 +1169,6 @@ class SlidingSyncTestCase(SlidingSyncBase):
self.persistence.persist_event(join_rule_event, join_rule_context)
)
# FIXME: We're manually busting the cache since
# https://github.com/element-hq/synapse/issues/17368 is not solved yet
self.store._membership_stream_cache.entity_has_changed(
user1_id, join_rule_event_pos.stream
)
# Ensure that the state reset worked and only user2 is in the room now
users_in_room = self.get_success(self.store.get_users_in_room(room_id1))
self.assertIncludes(set(users_in_room), {user2_id}, exact=True)
@ -1322,12 +1316,6 @@ class SlidingSyncTestCase(SlidingSyncBase):
self.persistence.persist_event(join_rule_event, join_rule_context)
)
# FIXME: We're manually busting the cache since
# https://github.com/element-hq/synapse/issues/17368 is not solved yet
self.store._membership_stream_cache.entity_has_changed(
user1_id, join_rule_event_pos.stream
)
# Ensure that the state reset worked and only user2 is in the room now
users_in_room = self.get_success(self.store.get_users_in_room(space_room_id))
self.assertIncludes(set(users_in_room), {user2_id}, exact=True)
@ -1506,12 +1494,6 @@ class SlidingSyncTestCase(SlidingSyncBase):
self.persistence.persist_event(join_rule_event, join_rule_context)
)
# FIXME: We're manually busting the cache since
# https://github.com/element-hq/synapse/issues/17368 is not solved yet
self.store._membership_stream_cache.entity_has_changed(
user1_id, join_rule_event_pos.stream
)
# Ensure that the state reset worked and only user2 is in the room now
users_in_room = self.get_success(self.store.get_users_in_room(space_room_id))
self.assertIncludes(set(users_in_room), {user2_id}, exact=True)

View file

@ -1,79 +0,0 @@
# Copyright 2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http import HTTPStatus
from unittest.mock import AsyncMock
from synapse.rest.client import auth_issuer
from tests.unittest import HomeserverTestCase, override_config, skip_unless
from tests.utils import HAS_AUTHLIB
ISSUER = "https://account.example.com/"
class AuthIssuerTestCase(HomeserverTestCase):
servlets = [
auth_issuer.register_servlets,
]
def test_returns_404_when_msc3861_disabled(self) -> None:
# Make an unauthenticated request for the discovery info.
channel = self.make_request(
"GET",
"/_matrix/client/unstable/org.matrix.msc2965/auth_issuer",
)
self.assertEqual(channel.code, HTTPStatus.NOT_FOUND)
@skip_unless(HAS_AUTHLIB, "requires authlib")
@override_config(
{
"disable_registration": True,
"experimental_features": {
"msc3861": {
"enabled": True,
"issuer": ISSUER,
"client_id": "David Lister",
"client_auth_method": "client_secret_post",
"client_secret": "Who shot Mister Burns?",
}
},
}
)
def test_returns_issuer_when_oidc_enabled(self) -> None:
# Patch the HTTP client to return the issuer metadata
req_mock = AsyncMock(return_value={"issuer": ISSUER})
self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign]
channel = self.make_request(
"GET",
"/_matrix/client/unstable/org.matrix.msc2965/auth_issuer",
)
self.assertEqual(channel.code, HTTPStatus.OK)
self.assertEqual(channel.json_body, {"issuer": ISSUER})
req_mock.assert_called_with(
"https://account.example.com/.well-known/openid-configuration"
)
req_mock.reset_mock()
# Second call it should use the cached value
channel = self.make_request(
"GET",
"/_matrix/client/unstable/org.matrix.msc2965/auth_issuer",
)
self.assertEqual(channel.code, HTTPStatus.OK)
self.assertEqual(channel.json_body, {"issuer": ISSUER})
req_mock.assert_not_called()

View file

@ -0,0 +1,140 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright 2023 The Matrix.org Foundation C.I.C
# Copyright (C) 2023-2025 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
# Originally licensed under the Apache License, Version 2.0:
# <http://www.apache.org/licenses/LICENSE-2.0>.
#
# [This file includes modifications made by New Vector Limited]
#
from http import HTTPStatus
from unittest.mock import AsyncMock
from synapse.rest.client import auth_metadata
from tests.unittest import HomeserverTestCase, override_config, skip_unless
from tests.utils import HAS_AUTHLIB
ISSUER = "https://account.example.com/"
class AuthIssuerTestCase(HomeserverTestCase):
servlets = [
auth_metadata.register_servlets,
]
def test_returns_404_when_msc3861_disabled(self) -> None:
# Make an unauthenticated request for the discovery info.
channel = self.make_request(
"GET",
"/_matrix/client/unstable/org.matrix.msc2965/auth_issuer",
)
self.assertEqual(channel.code, HTTPStatus.NOT_FOUND)
@skip_unless(HAS_AUTHLIB, "requires authlib")
@override_config(
{
"disable_registration": True,
"experimental_features": {
"msc3861": {
"enabled": True,
"issuer": ISSUER,
"client_id": "David Lister",
"client_auth_method": "client_secret_post",
"client_secret": "Who shot Mister Burns?",
}
},
}
)
def test_returns_issuer_when_oidc_enabled(self) -> None:
# Patch the HTTP client to return the issuer metadata
req_mock = AsyncMock(return_value={"issuer": ISSUER})
self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign]
channel = self.make_request(
"GET",
"/_matrix/client/unstable/org.matrix.msc2965/auth_issuer",
)
self.assertEqual(channel.code, HTTPStatus.OK)
self.assertEqual(channel.json_body, {"issuer": ISSUER})
req_mock.assert_called_with(
"https://account.example.com/.well-known/openid-configuration"
)
req_mock.reset_mock()
# Second call it should use the cached value
channel = self.make_request(
"GET",
"/_matrix/client/unstable/org.matrix.msc2965/auth_issuer",
)
self.assertEqual(channel.code, HTTPStatus.OK)
self.assertEqual(channel.json_body, {"issuer": ISSUER})
req_mock.assert_not_called()
class AuthMetadataTestCase(HomeserverTestCase):
servlets = [
auth_metadata.register_servlets,
]
def test_returns_404_when_msc3861_disabled(self) -> None:
# Make an unauthenticated request for the discovery info.
channel = self.make_request(
"GET",
"/_matrix/client/unstable/org.matrix.msc2965/auth_metadata",
)
self.assertEqual(channel.code, HTTPStatus.NOT_FOUND)
@skip_unless(HAS_AUTHLIB, "requires authlib")
@override_config(
{
"disable_registration": True,
"experimental_features": {
"msc3861": {
"enabled": True,
"issuer": ISSUER,
"client_id": "David Lister",
"client_auth_method": "client_secret_post",
"client_secret": "Who shot Mister Burns?",
}
},
}
)
def test_returns_issuer_when_oidc_enabled(self) -> None:
# Patch the HTTP client to return the issuer metadata
req_mock = AsyncMock(
return_value={
"issuer": ISSUER,
"authorization_endpoint": "https://example.com/auth",
"token_endpoint": "https://example.com/token",
}
)
self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign]
channel = self.make_request(
"GET",
"/_matrix/client/unstable/org.matrix.msc2965/auth_metadata",
)
self.assertEqual(channel.code, HTTPStatus.OK)
self.assertEqual(
channel.json_body,
{
"issuer": ISSUER,
"authorization_endpoint": "https://example.com/auth",
"token_endpoint": "https://example.com/token",
},
)

View file

@ -142,6 +142,50 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, HTTPStatus.OK)
self.assertFalse(capabilities["m.set_avatar_url"]["enabled"])
@override_config(
{
"enable_set_displayname": False,
"experimental_features": {"msc4133_enabled": True},
}
)
def test_get_set_displayname_capabilities_displayname_disabled_msc4133(
self,
) -> None:
"""Test if set displayname is disabled that the server responds it."""
access_token = self.login(self.localpart, self.password)
channel = self.make_request("GET", self.url, access_token=access_token)
capabilities = channel.json_body["capabilities"]
self.assertEqual(channel.code, HTTPStatus.OK)
self.assertFalse(capabilities["m.set_displayname"]["enabled"])
self.assertTrue(capabilities["uk.tcpip.msc4133.profile_fields"]["enabled"])
self.assertEqual(
capabilities["uk.tcpip.msc4133.profile_fields"]["disallowed"],
["displayname"],
)
@override_config(
{
"enable_set_avatar_url": False,
"experimental_features": {"msc4133_enabled": True},
}
)
def test_get_set_avatar_url_capabilities_avatar_url_disabled_msc4133(self) -> None:
"""Test if set avatar_url is disabled that the server responds it."""
access_token = self.login(self.localpart, self.password)
channel = self.make_request("GET", self.url, access_token=access_token)
capabilities = channel.json_body["capabilities"]
self.assertEqual(channel.code, HTTPStatus.OK)
self.assertFalse(capabilities["m.set_avatar_url"]["enabled"])
self.assertTrue(capabilities["uk.tcpip.msc4133.profile_fields"]["enabled"])
self.assertEqual(
capabilities["uk.tcpip.msc4133.profile_fields"]["disallowed"],
["avatar_url"],
)
@override_config({"enable_3pid_changes": False})
def test_get_change_3pid_capabilities_3pid_disabled(self) -> None:
"""Test if change 3pid is disabled that the server responds it."""

View file

@ -29,6 +29,7 @@ from synapse.types import UserID
from synapse.util import Clock
from tests import unittest
from tests.unittest import override_config
class PresenceTestCase(unittest.HomeserverTestCase):
@ -95,3 +96,54 @@ class PresenceTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, HTTPStatus.OK)
self.assertEqual(self.presence_handler.set_state.call_count, 0)
@override_config(
{"rc_presence": {"per_user": {"per_second": 0.1, "burst_count": 1}}}
)
def test_put_presence_over_ratelimit(self) -> None:
"""
Multiple PUTs to the status endpoint without sufficient delay will be rate limited.
"""
self.hs.config.server.presence_enabled = True
body = {"presence": "here", "status_msg": "beep boop"}
channel = self.make_request(
"PUT", "/presence/%s/status" % (self.user_id,), body
)
self.assertEqual(channel.code, HTTPStatus.OK)
body = {"presence": "here", "status_msg": "beep boop"}
channel = self.make_request(
"PUT", "/presence/%s/status" % (self.user_id,), body
)
self.assertEqual(channel.code, HTTPStatus.TOO_MANY_REQUESTS)
self.assertEqual(self.presence_handler.set_state.call_count, 1)
@override_config(
{"rc_presence": {"per_user": {"per_second": 0.1, "burst_count": 1}}}
)
def test_put_presence_within_ratelimit(self) -> None:
"""
Multiple PUTs to the status endpoint with sufficient delay should all call set_state.
"""
self.hs.config.server.presence_enabled = True
body = {"presence": "here", "status_msg": "beep boop"}
channel = self.make_request(
"PUT", "/presence/%s/status" % (self.user_id,), body
)
self.assertEqual(channel.code, HTTPStatus.OK)
# Advance time a sufficient amount to avoid rate limiting.
self.reactor.advance(30)
body = {"presence": "here", "status_msg": "beep boop"}
channel = self.make_request(
"PUT", "/presence/%s/status" % (self.user_id,), body
)
self.assertEqual(channel.code, HTTPStatus.OK)
self.assertEqual(self.presence_handler.set_state.call_count, 2)

View file

@ -25,16 +25,20 @@ import urllib.parse
from http import HTTPStatus
from typing import Any, Dict, Optional
from canonicaljson import encode_canonical_json
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import Codes
from synapse.rest import admin
from synapse.rest.client import login, profile, room
from synapse.server import HomeServer
from synapse.storage.databases.main.profile import MAX_PROFILE_SIZE
from synapse.types import UserID
from synapse.util import Clock
from tests import unittest
from tests.utils import USE_POSTGRES_FOR_TESTS
class ProfileTestCase(unittest.HomeserverTestCase):
@ -480,6 +484,298 @@ class ProfileTestCase(unittest.HomeserverTestCase):
# The client requested ?propagate=true, so it should have happened.
self.assertEqual(channel.json_body.get(prop), "http://my.server/pic.gif")
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_get_missing_custom_field(self) -> None:
channel = self.make_request(
"GET",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field",
)
self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND)
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_get_missing_custom_field_invalid_field_name(self) -> None:
channel = self.make_request(
"GET",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/[custom_field]",
)
self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_get_custom_field_rejects_bad_username(self) -> None:
channel = self.make_request(
"GET",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{urllib.parse.quote('@alice:')}/custom_field",
)
self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_set_custom_field(self) -> None:
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field",
content={"custom_field": "test"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
channel = self.make_request(
"GET",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field",
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
self.assertEqual(channel.json_body, {"custom_field": "test"})
# Overwriting the field should work.
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field",
content={"custom_field": "new_Value"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
channel = self.make_request(
"GET",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field",
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
self.assertEqual(channel.json_body, {"custom_field": "new_Value"})
# Deleting the field should work.
channel = self.make_request(
"DELETE",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field",
content={},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
channel = self.make_request(
"GET",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field",
)
self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND)
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_non_string(self) -> None:
"""Non-string fields are supported for custom fields."""
fields = {
"bool_field": True,
"array_field": ["test"],
"object_field": {"test": "test"},
"numeric_field": 1,
"null_field": None,
}
for key, value in fields.items():
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}",
content={key: value},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
channel = self.make_request(
"GET",
f"/_matrix/client/v3/profile/{self.owner}",
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
self.assertEqual(channel.json_body, {"displayname": "owner", **fields})
# Check getting individual fields works.
for key, value in fields.items():
channel = self.make_request(
"GET",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}",
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
self.assertEqual(channel.json_body, {key: value})
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_set_custom_field_noauth(self) -> None:
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field",
content={"custom_field": "test"},
)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.MISSING_TOKEN)
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_set_custom_field_size(self) -> None:
"""
Attempts to set a custom field name that is too long should get a 400 error.
"""
# Key is missing.
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/",
content={"": "test"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 400, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
# Single key is too large.
key = "c" * 500
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}",
content={key: "test"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 400, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.KEY_TOO_LARGE)
channel = self.make_request(
"DELETE",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}",
content={key: "test"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 400, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.KEY_TOO_LARGE)
# Key doesn't match body.
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field",
content={"diff_key": "test"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 400, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.MISSING_PARAM)
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_set_custom_field_profile_too_long(self) -> None:
"""
Attempts to set a custom field that would push the overall profile too large.
"""
# Get right to the boundary:
# len("displayname") + len("owner") + 5 = 21 for the displayname
# 1 + 65498 + 5 for key "a" = 65504
# 2 braces, 1 comma
# 3 + 21 + 65498 = 65522 < 65536.
key = "a"
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}",
content={key: "a" * 65498},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
# Get the entire profile.
channel = self.make_request(
"GET",
f"/_matrix/client/v3/profile/{self.owner}",
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
canonical_json = encode_canonical_json(channel.json_body)
# 6 is the minimum bytes to store a value: 4 quotes, 1 colon, 1 comma, an empty key.
# Be one below that so we can prove we're at the boundary.
self.assertEqual(len(canonical_json), MAX_PROFILE_SIZE - 8)
# Postgres stores JSONB with whitespace, while SQLite doesn't.
if USE_POSTGRES_FOR_TESTS:
ADDITIONAL_CHARS = 0
else:
ADDITIONAL_CHARS = 1
# The next one should fail, note the value has a (JSON) length of 2.
key = "b"
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}",
content={key: "1" + "a" * ADDITIONAL_CHARS},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 400, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.PROFILE_TOO_LARGE)
# Setting an avatar or (longer) display name should not work.
channel = self.make_request(
"PUT",
f"/profile/{self.owner}/displayname",
content={"displayname": "owner12345678" + "a" * ADDITIONAL_CHARS},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 400, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.PROFILE_TOO_LARGE)
channel = self.make_request(
"PUT",
f"/profile/{self.owner}/avatar_url",
content={"avatar_url": "mxc://foo/bar"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 400, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.PROFILE_TOO_LARGE)
# Removing a single byte should work.
key = "b"
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}",
content={key: "" + "a" * ADDITIONAL_CHARS},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
# Finally, setting a field that already exists to a value that is <= in length should work.
key = "a"
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}",
content={key: ""},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_set_custom_field_displayname(self) -> None:
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/displayname",
content={"displayname": "test"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
displayname = self._get_displayname()
self.assertEqual(displayname, "test")
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_set_custom_field_avatar_url(self) -> None:
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/avatar_url",
content={"avatar_url": "mxc://test/good"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
avatar_url = self._get_avatar_url()
self.assertEqual(avatar_url, "mxc://test/good")
@unittest.override_config({"experimental_features": {"msc4133_enabled": True}})
def test_set_custom_field_other(self) -> None:
"""Setting someone else's profile field should fail"""
channel = self.make_request(
"PUT",
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.other}/custom_field",
content={"custom_field": "test"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 403, channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN)
def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]) -> None:
"""Stores metadata about files in the database.

View file

@ -742,7 +742,7 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
self.assertEqual(33, channel.resource_usage.db_txn_count)
self.assertEqual(36, channel.resource_usage.db_txn_count)
def test_post_room_initial_state(self) -> None:
# POST with initial_state config key, expect new room id
@ -755,7 +755,7 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
self.assertEqual(35, channel.resource_usage.db_txn_count)
self.assertEqual(38, channel.resource_usage.db_txn_count)
def test_post_room_visibility_key(self) -> None:
# POST with visibility config key, expect new room id

View file

@ -548,7 +548,7 @@ class RestHelper:
room_id: str,
event_type: str,
body: Dict[str, Any],
tok: Optional[str],
tok: Optional[str] = None,
expect_code: int = HTTPStatus.OK,
state_key: str = "",
) -> JsonDict:

View file

@ -23,6 +23,7 @@ from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import NotFoundError, SynapseError
from synapse.rest.client import room
from synapse.server import HomeServer
from synapse.types.state import StateFilter
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@ -40,6 +41,8 @@ class PurgeTests(HomeserverTestCase):
self.room_id = self.helper.create_room_as(self.user_id)
self.store = hs.get_datastores().main
self.state_store = hs.get_datastores().state
self.state_deletion_store = hs.get_datastores().state_deletion
self._storage_controllers = self.hs.get_storage_controllers()
def test_purge_history(self) -> None:
@ -128,3 +131,142 @@ class PurgeTests(HomeserverTestCase):
self.store._invalidate_local_get_event_cache(create_event.event_id)
self.get_failure(self.store.get_event(create_event.event_id), NotFoundError)
self.get_failure(self.store.get_event(first["event_id"]), NotFoundError)
def test_purge_history_deletes_state_groups(self) -> None:
"""Test that unreferenced state groups get cleaned up after purge"""
# Send four state changes to the room.
first = self.helper.send_state(
self.room_id, event_type="m.foo", body={"test": 1}
)
second = self.helper.send_state(
self.room_id, event_type="m.foo", body={"test": 2}
)
third = self.helper.send_state(
self.room_id, event_type="m.foo", body={"test": 3}
)
last = self.helper.send_state(
self.room_id, event_type="m.foo", body={"test": 4}
)
# Get references to the state groups
event_to_groups = self.get_success(
self.store._get_state_group_for_events(
[
first["event_id"],
second["event_id"],
third["event_id"],
last["event_id"],
]
)
)
# Get the topological token
token = self.get_success(
self.store.get_topological_token_for_event(last["event_id"])
)
token_str = self.get_success(token.to_string(self.hs.get_datastores().main))
# Purge everything before this topological token
self.get_success(
self._storage_controllers.purge_events.purge_history(
self.room_id, token_str, True
)
)
# Advance so that the background jobs to delete the state groups runs
self.reactor.advance(
1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
)
# We expect all the state groups associated with events above, except
# the last one, should return no state.
state_groups = self.get_success(
self.state_store._get_state_groups_from_groups(
list(event_to_groups.values()), StateFilter.all()
)
)
first_state = state_groups[event_to_groups[first["event_id"]]]
second_state = state_groups[event_to_groups[second["event_id"]]]
third_state = state_groups[event_to_groups[third["event_id"]]]
last_state = state_groups[event_to_groups[last["event_id"]]]
self.assertEqual(first_state, {})
self.assertEqual(second_state, {})
self.assertEqual(third_state, {})
self.assertNotEqual(last_state, {})
def test_purge_unreferenced_state_group(self) -> None:
"""Test that purging a room also gets rid of unreferenced state groups
it encounters during the purge.
This is important, as otherwise these unreferenced state groups get
"de-deltaed" during the purge process, consuming lots of disk space.
"""
self.helper.send(self.room_id, body="test1")
state1 = self.helper.send_state(
self.room_id, "org.matrix.test", body={"number": 2}
)
state2 = self.helper.send_state(
self.room_id, "org.matrix.test", body={"number": 3}
)
self.helper.send(self.room_id, body="test4")
last = self.helper.send(self.room_id, body="test5")
# Create an unreferenced state group that has a prev group of one of the
# to-be-purged events.
prev_group = self.get_success(
self.store._get_state_group_for_event(state1["event_id"])
)
unreferenced_state_group = self.get_success(
self.state_store.store_state_group(
event_id=last["event_id"],
room_id=self.room_id,
prev_group=prev_group,
delta_ids={("org.matrix.test", ""): state2["event_id"]},
current_state_ids=None,
)
)
# Get the topological token
token = self.get_success(
self.store.get_topological_token_for_event(last["event_id"])
)
token_str = self.get_success(token.to_string(self.hs.get_datastores().main))
# Purge everything before this topological token
self.get_success(
self._storage_controllers.purge_events.purge_history(
self.room_id, token_str, True
)
)
# Advance so that the background jobs to delete the state groups runs
self.reactor.advance(
1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
)
# We expect that the unreferenced state group has been deleted.
row = self.get_success(
self.state_store.db_pool.simple_select_one_onecol(
table="state_groups",
keyvalues={"id": unreferenced_state_group},
retcol="id",
allow_none=True,
desc="test_purge_unreferenced_state_group",
)
)
self.assertIsNone(row)
# We expect there to now only be one state group for the room, which is
# the state group of the last event (as the only outlier).
state_groups = self.get_success(
self.state_store.db_pool.simple_select_onecol(
table="state_groups",
keyvalues={"room_id": self.room_id},
retcol="id",
desc="test_purge_unreferenced_state_group",
)
)
self.assertEqual(len(state_groups), 1)

View file

@ -0,0 +1,475 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2025 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from twisted.test.proto_helpers import MemoryReactor
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.util import Clock
from tests.test_utils.event_injection import create_event
from tests.unittest import HomeserverTestCase
logger = logging.getLogger(__name__)
class StateDeletionStoreTestCase(HomeserverTestCase):
"""Tests for the StateDeletionStore."""
servlets = [
admin.register_servlets,
room.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.state_store = hs.get_datastores().state
self.state_deletion_store = hs.get_datastores().state_deletion
self.purge_events = hs.get_storage_controllers().purge_events
# We want to disable the automatic deletion of state groups in the
# background, so we can do controlled tests.
self.purge_events._delete_state_loop_call.stop()
self.user_id = self.register_user("test", "password")
tok = self.login("test", "password")
self.room_id = self.helper.create_room_as(self.user_id, tok=tok)
def check_if_can_be_deleted(self, state_group: int) -> bool:
"""Check if the state group is pending deletion."""
state_group_to_sequence_number = self.get_success(
self.state_deletion_store.get_pending_deletions([state_group])
)
can_be_deleted = self.get_success(
self.state_deletion_store.db_pool.runInteraction(
"test_existing_pending_deletion_is_cleared",
self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn,
state_group_to_sequence_number,
)
)
return state_group in can_be_deleted
def test_no_deletion(self) -> None:
"""Test that calling persisting_state_group_references is fine if
nothing is pending deletion"""
event, context = self.get_success(
create_event(
self.hs,
room_id=self.room_id,
type="m.test",
sender=self.user_id,
)
)
ctx_mgr = self.state_deletion_store.persisting_state_group_references(
[(event, context)]
)
self.get_success(ctx_mgr.__aenter__())
self.get_success(ctx_mgr.__aexit__(None, None, None))
def test_no_deletion_error(self) -> None:
"""Test that calling persisting_state_group_references is fine if
nothing is pending deletion, but an error occurs."""
event, context = self.get_success(
create_event(
self.hs,
room_id=self.room_id,
type="m.test",
sender=self.user_id,
)
)
ctx_mgr = self.state_deletion_store.persisting_state_group_references(
[(event, context)]
)
self.get_success(ctx_mgr.__aenter__())
self.get_success(ctx_mgr.__aexit__(Exception, Exception("test"), None))
def test_existing_pending_deletion_is_cleared(self) -> None:
"""Test that the pending deletion flag gets cleared when the state group
gets persisted."""
event, context = self.get_success(
create_event(
self.hs,
room_id=self.room_id,
type="m.test",
state_key="",
sender=self.user_id,
)
)
assert context.state_group is not None
# Mark a state group that we're referencing as pending deletion.
self.get_success(
self.state_deletion_store.mark_state_groups_as_pending_deletion(
[context.state_group]
)
)
ctx_mgr = self.state_deletion_store.persisting_state_group_references(
[(event, context)]
)
self.get_success(ctx_mgr.__aenter__())
self.get_success(ctx_mgr.__aexit__(None, None, None))
# The pending deletion flag should be cleared
pending_deletion = self.get_success(
self.state_deletion_store.db_pool.simple_select_one_onecol(
table="state_groups_pending_deletion",
keyvalues={"state_group": context.state_group},
retcol="1",
allow_none=True,
desc="test_existing_pending_deletion_is_cleared",
)
)
self.assertIsNone(pending_deletion)
def test_pending_deletion_is_cleared_during_persist(self) -> None:
"""Test that the pending deletion flag is cleared when a state group
gets marked for deletion during persistence"""
event, context = self.get_success(
create_event(
self.hs,
room_id=self.room_id,
type="m.test",
state_key="",
sender=self.user_id,
)
)
assert context.state_group is not None
ctx_mgr = self.state_deletion_store.persisting_state_group_references(
[(event, context)]
)
self.get_success(ctx_mgr.__aenter__())
# Mark the state group that we're referencing as pending deletion,
# *after* we have started persisting.
self.get_success(
self.state_deletion_store.mark_state_groups_as_pending_deletion(
[context.state_group]
)
)
self.get_success(ctx_mgr.__aexit__(None, None, None))
# The pending deletion flag should be cleared
pending_deletion = self.get_success(
self.state_deletion_store.db_pool.simple_select_one_onecol(
table="state_groups_pending_deletion",
keyvalues={"state_group": context.state_group},
retcol="1",
allow_none=True,
desc="test_existing_pending_deletion_is_cleared",
)
)
self.assertIsNone(pending_deletion)
def test_deletion_check(self) -> None:
"""Test that the `get_state_groups_that_can_be_purged_txn` check is
correct during different points of the lifecycle of persisting an
event."""
event, context = self.get_success(
create_event(
self.hs,
room_id=self.room_id,
type="m.test",
state_key="",
sender=self.user_id,
)
)
assert context.state_group is not None
self.get_success(
self.state_deletion_store.mark_state_groups_as_pending_deletion(
[context.state_group]
)
)
# We shouldn't be able to delete the state group as not enough time as passed
can_be_deleted = self.check_if_can_be_deleted(context.state_group)
self.assertFalse(can_be_deleted)
# After enough time we can delete the state group
self.reactor.advance(
1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
)
can_be_deleted = self.check_if_can_be_deleted(context.state_group)
self.assertTrue(can_be_deleted)
ctx_mgr = self.state_deletion_store.persisting_state_group_references(
[(event, context)]
)
self.get_success(ctx_mgr.__aenter__())
# But once we start persisting we can't delete the state group
can_be_deleted = self.check_if_can_be_deleted(context.state_group)
self.assertFalse(can_be_deleted)
self.get_success(ctx_mgr.__aexit__(None, None, None))
# The pending deletion flag should remain cleared after persistence has
# finished.
can_be_deleted = self.check_if_can_be_deleted(context.state_group)
self.assertFalse(can_be_deleted)
def test_deletion_error_during_persistence(self) -> None:
"""Test that state groups remain marked as pending deletion if persisting
the event fails."""
event, context = self.get_success(
create_event(
self.hs,
room_id=self.room_id,
type="m.test",
state_key="",
sender=self.user_id,
)
)
assert context.state_group is not None
# Mark a state group that we're referencing as pending deletion.
self.get_success(
self.state_deletion_store.mark_state_groups_as_pending_deletion(
[context.state_group]
)
)
ctx_mgr = self.state_deletion_store.persisting_state_group_references(
[(event, context)]
)
self.get_success(ctx_mgr.__aenter__())
self.get_success(ctx_mgr.__aexit__(Exception, Exception("test"), None))
# We should be able to delete the state group after a certain amount of
# time
self.reactor.advance(
1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
)
can_be_deleted = self.check_if_can_be_deleted(context.state_group)
self.assertTrue(can_be_deleted)
def test_race_between_check_and_insert(self) -> None:
"""Check that we correctly handle the race where we go to delete a
state group, check that it is unreferenced, and then it becomes
referenced just before we delete it."""
event, context = self.get_success(
create_event(
self.hs,
room_id=self.room_id,
type="m.test",
state_key="",
sender=self.user_id,
)
)
assert context.state_group is not None
# Mark a state group that we're referencing as pending deletion.
self.get_success(
self.state_deletion_store.mark_state_groups_as_pending_deletion(
[context.state_group]
)
)
# Advance time enough so we can delete the state group
self.reactor.advance(
1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
)
# Check that we'd be able to delete this state group.
state_group_to_sequence_number = self.get_success(
self.state_deletion_store.get_pending_deletions([context.state_group])
)
can_be_deleted = self.get_success(
self.state_deletion_store.db_pool.runInteraction(
"test_existing_pending_deletion_is_cleared",
self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn,
state_group_to_sequence_number,
)
)
self.assertCountEqual(can_be_deleted, [context.state_group])
# ... in the real world we'd check that the state group isn't referenced here ...
# Now we persist the event to reference the state group, *after* we
# check that the state group wasn't referenced
ctx_mgr = self.state_deletion_store.persisting_state_group_references(
[(event, context)]
)
self.get_success(ctx_mgr.__aenter__())
self.get_success(ctx_mgr.__aexit__(Exception, Exception("test"), None))
# We simulate a pause (required to hit the race)
self.reactor.advance(
1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
)
# We should no longer be able to delete the state group, without having
# to recheck if its referenced.
can_be_deleted = self.get_success(
self.state_deletion_store.db_pool.runInteraction(
"test_existing_pending_deletion_is_cleared",
self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn,
state_group_to_sequence_number,
)
)
self.assertCountEqual(can_be_deleted, [])
def test_remove_ancestors_from_can_delete(self) -> None:
"""Test that if a state group is not ready to be deleted, we also don't
delete anything that is referenced by it"""
event, context = self.get_success(
create_event(
self.hs,
room_id=self.room_id,
type="m.test",
state_key="",
sender=self.user_id,
)
)
assert context.state_group is not None
# Create a new state group that references the one from the event
new_state_group = self.get_success(
self.state_store.store_state_group(
event.event_id,
event.room_id,
prev_group=context.state_group,
delta_ids={},
current_state_ids=None,
)
)
# Mark them both as pending deletion
self.get_success(
self.state_deletion_store.mark_state_groups_as_pending_deletion(
[context.state_group, new_state_group]
)
)
# Advance time enough so we can delete the state group so they're both
# ready for deletion.
self.reactor.advance(
1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
)
# We can now delete both state groups
self.assertTrue(self.check_if_can_be_deleted(context.state_group))
self.assertTrue(self.check_if_can_be_deleted(new_state_group))
# Use the new_state_group to bump its deletion time
self.get_success(
self.state_store.store_state_group(
event.event_id,
event.room_id,
prev_group=new_state_group,
delta_ids={},
current_state_ids=None,
)
)
# We should now not be able to delete either of the state groups.
state_group_to_sequence_number = self.get_success(
self.state_deletion_store.get_pending_deletions(
[context.state_group, new_state_group]
)
)
# We shouldn't be able to delete the state group as not enough time has passed
can_be_deleted = self.get_success(
self.state_deletion_store.db_pool.runInteraction(
"test_existing_pending_deletion_is_cleared",
self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn,
state_group_to_sequence_number,
)
)
self.assertCountEqual(can_be_deleted, [])
def test_newly_referenced_state_group_gets_removed_from_pending(self) -> None:
"""Check that if a state group marked for deletion becomes referenced
(without being removed from pending deletion table), it gets removed
from pending deletion table."""
event, context = self.get_success(
create_event(
self.hs,
room_id=self.room_id,
type="m.test",
state_key="",
sender=self.user_id,
)
)
assert context.state_group is not None
# Mark a state group that we're referencing as pending deletion.
self.get_success(
self.state_deletion_store.mark_state_groups_as_pending_deletion(
[context.state_group]
)
)
# Advance time enough so we can delete the state group so they're both
# ready for deletion.
self.reactor.advance(
1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
)
# Manually insert into the table to mimic the state group getting used.
self.get_success(
self.store.db_pool.simple_insert(
table="event_to_state_groups",
values={"state_group": context.state_group, "event_id": event.event_id},
desc="test_newly_referenced_state_group_gets_removed_from_pending",
)
)
# Manually run the background task to delete pending state groups.
self.get_success(self.purge_events._delete_state_groups_loop())
# The pending deletion flag should be cleared...
pending_deletion = self.get_success(
self.state_deletion_store.db_pool.simple_select_one_onecol(
table="state_groups_pending_deletion",
keyvalues={"state_group": context.state_group},
retcol="1",
allow_none=True,
desc="test_newly_referenced_state_group_gets_removed_from_pending",
)
)
self.assertIsNone(pending_deletion)
# .. but the state should not have been deleted.
state = self.get_success(
self.state_store._get_state_for_groups([context.state_group])
)
self.assertGreater(len(state[context.state_group]), 0)

View file

@ -1209,12 +1209,6 @@ class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase):
self.persistence.persist_event(join_rule_event, join_rule_context)
)
# FIXME: We're manually busting the cache since
# https://github.com/element-hq/synapse/issues/17368 is not solved yet
self.store._membership_stream_cache.entity_has_changed(
user1_id, join_rule_event_pos.stream
)
after_reset_token = self.event_sources.get_current_token()
membership_changes = self.get_success(

View file

@ -1,378 +0,0 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright 2020 The Matrix.org Foundation C.I.C.
# Copyright (C) 2023 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
# Originally licensed under the Apache License, Version 2.0:
# <http://www.apache.org/licenses/LICENSE-2.0>.
#
# [This file includes modifications made by New Vector Limited]
#
#
from typing import Collection, List, Optional, Union
from unittest.mock import AsyncMock, Mock
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import FederationError
from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.events import EventBase, make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.federation.federation_base import event_from_pdu_json
from synapse.handlers.device import DeviceListUpdater
from synapse.http.types import QueryParams
from synapse.logging.context import LoggingContext
from synapse.server import HomeServer
from synapse.types import JsonDict, UserID, create_requester
from synapse.util import Clock
from synapse.util.retryutils import NotRetryingDestination
from tests import unittest
class MessageAcceptTests(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.http_client = Mock()
return self.setup_test_homeserver(federation_http_client=self.http_client)
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
user_id = UserID("us", "test")
our_user = create_requester(user_id)
room_creator = self.hs.get_room_creation_handler()
self.room_id = self.get_success(
room_creator.create_room(
our_user, room_creator._presets_dict["public_chat"], ratelimit=False
)
)[0]
self.store = self.hs.get_datastores().main
# Figure out what the most recent event is
most_recent = next(
iter(
self.get_success(
self.hs.get_datastores().main.get_latest_event_ids_in_room(
self.room_id
)
)
)
)
join_event = make_event_from_dict(
{
"room_id": self.room_id,
"sender": "@baduser:test.serv",
"state_key": "@baduser:test.serv",
"event_id": "$join:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.member",
"origin": "test.servx",
"content": {"membership": "join"},
"auth_events": [],
"prev_state": [(most_recent, {})],
"prev_events": [(most_recent, {})],
}
)
self.handler = self.hs.get_federation_handler()
federation_event_handler = self.hs.get_federation_event_handler()
async def _check_event_auth(
origin: Optional[str], event: EventBase, context: EventContext
) -> None:
pass
federation_event_handler._check_event_auth = _check_event_auth # type: ignore[method-assign]
self.client = self.hs.get_federation_client()
async def _check_sigs_and_hash_for_pulled_events_and_fetch(
dest: str, pdus: Collection[EventBase], room_version: RoomVersion
) -> List[EventBase]:
return list(pdus)
self.client._check_sigs_and_hash_for_pulled_events_and_fetch = ( # type: ignore[method-assign]
_check_sigs_and_hash_for_pulled_events_and_fetch # type: ignore[assignment]
)
# Send the join, it should return None (which is not an error)
self.assertEqual(
self.get_success(
federation_event_handler.on_receive_pdu("test.serv", join_event)
),
None,
)
# Make sure we actually joined the room
self.assertEqual(
self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)),
{"$join:test.serv"},
)
def test_cant_hide_direct_ancestors(self) -> None:
"""
If you send a message, you must be able to provide the direct
prev_events that said event references.
"""
async def post_json(
destination: str,
path: str,
data: Optional[JsonDict] = None,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
args: Optional[QueryParams] = None,
) -> Union[JsonDict, list]:
# If it asks us for new missing events, give them NOTHING
if path.startswith("/_matrix/federation/v1/get_missing_events/"):
return {"events": []}
return {}
self.http_client.post_json = post_json
# Figure out what the most recent event is
most_recent = next(
iter(
self.get_success(self.store.get_latest_event_ids_in_room(self.room_id))
)
)
# Now lie about an event
lying_event = make_event_from_dict(
{
"room_id": self.room_id,
"sender": "@baduser:test.serv",
"event_id": "one:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.message",
"origin": "test.serv",
"content": {"body": "hewwo?"},
"auth_events": [],
"prev_events": [("two:test.serv", {}), (most_recent, {})],
}
)
federation_event_handler = self.hs.get_federation_event_handler()
with LoggingContext("test-context"):
failure = self.get_failure(
federation_event_handler.on_receive_pdu("test.serv", lying_event),
FederationError,
)
# on_receive_pdu should throw an error
self.assertEqual(
failure.value.args[0],
(
"ERROR 403: Your server isn't divulging details about prev_events "
"referenced in this event."
),
)
# Make sure the invalid event isn't there
extrem = self.get_success(self.store.get_latest_event_ids_in_room(self.room_id))
self.assertEqual(extrem, {"$join:test.serv"})
def test_retry_device_list_resync(self) -> None:
"""Tests that device lists are marked as stale if they couldn't be synced, and
that stale device lists are retried periodically.
"""
remote_user_id = "@john:test_remote"
remote_origin = "test_remote"
# Track the number of attempts to resync the user's device list.
self.resync_attempts = 0
# When this function is called, increment the number of resync attempts (only if
# we're querying devices for the right user ID), then raise a
# NotRetryingDestination error to fail the resync gracefully.
def query_user_devices(
destination: str, user_id: str, timeout: int = 30000
) -> JsonDict:
if user_id == remote_user_id:
self.resync_attempts += 1
raise NotRetryingDestination(0, 0, destination)
# Register the mock on the federation client.
federation_client = self.hs.get_federation_client()
federation_client.query_user_devices = Mock(side_effect=query_user_devices) # type: ignore[method-assign]
# Register a mock on the store so that the incoming update doesn't fail because
# we don't share a room with the user.
store = self.hs.get_datastores().main
store.get_rooms_for_user = AsyncMock(return_value=["!someroom:test"])
# Manually inject a fake device list update. We need this update to include at
# least one prev_id so that the user's device list will need to be retried.
device_list_updater = self.hs.get_device_handler().device_list_updater
assert isinstance(device_list_updater, DeviceListUpdater)
self.get_success(
device_list_updater.incoming_device_list_update(
origin=remote_origin,
edu_content={
"deleted": False,
"device_display_name": "Mobile",
"device_id": "QBUAZIFURK",
"prev_id": [5],
"stream_id": 6,
"user_id": remote_user_id,
},
)
)
# Check that there was one resync attempt.
self.assertEqual(self.resync_attempts, 1)
# Check that the resync attempt failed and caused the user's device list to be
# marked as stale.
need_resync = self.get_success(
store.get_user_ids_requiring_device_list_resync()
)
self.assertIn(remote_user_id, need_resync)
# Check that waiting for 30 seconds caused Synapse to retry resyncing the device
# list.
self.reactor.advance(30)
self.assertEqual(self.resync_attempts, 2)
def test_cross_signing_keys_retry(self) -> None:
"""Tests that resyncing a device list correctly processes cross-signing keys from
the remote server.
"""
remote_user_id = "@john:test_remote"
remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY"
remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ"
# Register mock device list retrieval on the federation client.
federation_client = self.hs.get_federation_client()
federation_client.query_user_devices = AsyncMock( # type: ignore[method-assign]
return_value={
"user_id": remote_user_id,
"stream_id": 1,
"devices": [],
"master_key": {
"user_id": remote_user_id,
"usage": ["master"],
"keys": {"ed25519:" + remote_master_key: remote_master_key},
},
"self_signing_key": {
"user_id": remote_user_id,
"usage": ["self_signing"],
"keys": {
"ed25519:" + remote_self_signing_key: remote_self_signing_key
},
},
}
)
# Resync the device list.
device_handler = self.hs.get_device_handler()
self.get_success(
device_handler.device_list_updater.multi_user_device_resync(
[remote_user_id]
),
)
# Retrieve the cross-signing keys for this user.
keys = self.get_success(
self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]),
)
self.assertIn(remote_user_id, keys)
key = keys[remote_user_id]
assert key is not None
# Check that the master key is the one returned by the mock.
master_key = key["master"]
self.assertEqual(len(master_key["keys"]), 1)
self.assertTrue("ed25519:" + remote_master_key in master_key["keys"].keys())
self.assertTrue(remote_master_key in master_key["keys"].values())
# Check that the self-signing key is the one returned by the mock.
self_signing_key = key["self_signing"]
self.assertEqual(len(self_signing_key["keys"]), 1)
self.assertTrue(
"ed25519:" + remote_self_signing_key in self_signing_key["keys"].keys(),
)
self.assertTrue(remote_self_signing_key in self_signing_key["keys"].values())
class StripUnsignedFromEventsTestCase(unittest.TestCase):
def test_strip_unauthorized_unsigned_values(self) -> None:
event1 = {
"sender": "@baduser:test.serv",
"state_key": "@baduser:test.serv",
"event_id": "$event1:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.member",
"origin": "test.servx",
"content": {"membership": "join"},
"auth_events": [],
"unsigned": {"malicious garbage": "hackz", "more warez": "more hackz"},
}
filtered_event = event_from_pdu_json(event1, RoomVersions.V1)
# Make sure unauthorized fields are stripped from unsigned
self.assertNotIn("more warez", filtered_event.unsigned)
def test_strip_event_maintains_allowed_fields(self) -> None:
event2 = {
"sender": "@baduser:test.serv",
"state_key": "@baduser:test.serv",
"event_id": "$event2:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.member",
"origin": "test.servx",
"auth_events": [],
"content": {"membership": "join"},
"unsigned": {
"malicious garbage": "hackz",
"more warez": "more hackz",
"age": 14,
"invite_room_state": [],
},
}
filtered_event2 = event_from_pdu_json(event2, RoomVersions.V1)
self.assertIn("age", filtered_event2.unsigned)
self.assertEqual(14, filtered_event2.unsigned["age"])
self.assertNotIn("more warez", filtered_event2.unsigned)
# Invite_room_state is allowed in events of type m.room.member
self.assertIn("invite_room_state", filtered_event2.unsigned)
self.assertEqual([], filtered_event2.unsigned["invite_room_state"])
def test_strip_event_removes_fields_based_on_event_type(self) -> None:
event3 = {
"sender": "@baduser:test.serv",
"state_key": "@baduser:test.serv",
"event_id": "$event3:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.power_levels",
"origin": "test.servx",
"content": {},
"auth_events": [],
"unsigned": {
"malicious garbage": "hackz",
"more warez": "more hackz",
"age": 14,
"invite_room_state": [],
},
}
filtered_event3 = event_from_pdu_json(event3, RoomVersions.V1)
self.assertIn("age", filtered_event3.unsigned)
# Invite_room_state field is only permitted in event type m.room.member
self.assertNotIn("invite_room_state", filtered_event3.unsigned)
self.assertNotIn("more warez", filtered_event3.unsigned)

View file

@ -31,7 +31,7 @@ from typing import (
Tuple,
cast,
)
from unittest.mock import Mock
from unittest.mock import AsyncMock, Mock
from twisted.internet import defer
@ -221,7 +221,16 @@ class Graph:
class StateTestCase(unittest.TestCase):
def setUp(self) -> None:
self.dummy_store = _DummyStore()
storage_controllers = Mock(main=self.dummy_store, state=self.dummy_store)
# Add a dummy epoch store that always retruns that we have all the
# necessary state groups.
dummy_deletion_store = AsyncMock()
dummy_deletion_store.check_state_groups_and_bump_deletion.return_value = []
storage_controllers = Mock(
main=self.dummy_store,
state=self.dummy_store,
)
hs = Mock(
spec_set=[
"config",
@ -241,7 +250,10 @@ class StateTestCase(unittest.TestCase):
)
clock = cast(Clock, MockClock())
hs.config = default_config("tesths", True)
hs.get_datastores.return_value = Mock(main=self.dummy_store)
hs.get_datastores.return_value = Mock(
main=self.dummy_store,
state_deletion=dummy_deletion_store,
)
hs.get_state_handler.return_value = None
hs.get_clock.return_value = clock
hs.get_macaroon_generator.return_value = MacaroonGenerator(

View file

@ -255,3 +255,28 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
# Unknown entities will return None
self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), None)
def test_all_entities_changed(self) -> None:
"""
`StreamChangeCache.all_entities_changed(...)` will mark all entites as changed.
"""
cache = StreamChangeCache("#test", 1, max_size=10)
cache.entity_has_changed("user@foo.com", 2)
cache.entity_has_changed("bar@baz.net", 3)
cache.entity_has_changed("user@elsewhere.org", 4)
cache.all_entities_changed(5)
# Everything should be marked as changed before the stream position where the
# change occurred.
self.assertTrue(cache.has_entity_changed("user@foo.com", 4))
self.assertTrue(cache.has_entity_changed("bar@baz.net", 4))
self.assertTrue(cache.has_entity_changed("user@elsewhere.org", 4))
# Nothing should be marked as changed at/after the stream position where the
# change occurred. In other words, nothing has changed since the stream position
# 5.
self.assertFalse(cache.has_entity_changed("user@foo.com", 5))
self.assertFalse(cache.has_entity_changed("bar@baz.net", 5))
self.assertFalse(cache.has_entity_changed("user@elsewhere.org", 5))

View file

@ -20,7 +20,11 @@
#
from synapse.api.errors import SynapseError
from synapse.util.stringutils import assert_valid_client_secret, base62_encode
from synapse.util.stringutils import (
assert_valid_client_secret,
base62_encode,
is_namedspaced_grammar,
)
from .. import unittest
@ -58,3 +62,25 @@ class StringUtilsTestCase(unittest.TestCase):
self.assertEqual("10", base62_encode(62))
self.assertEqual("1c", base62_encode(100))
self.assertEqual("001c", base62_encode(100, minwidth=4))
def test_namespaced_identifier(self) -> None:
self.assertTrue(is_namedspaced_grammar("test"))
self.assertTrue(is_namedspaced_grammar("m.test"))
self.assertTrue(is_namedspaced_grammar("org.matrix.test"))
self.assertTrue(is_namedspaced_grammar("org.matrix.msc1234"))
self.assertTrue(is_namedspaced_grammar("test"))
self.assertTrue(is_namedspaced_grammar("t-e_s.t"))
# Must start with letter.
self.assertFalse(is_namedspaced_grammar("1test"))
self.assertFalse(is_namedspaced_grammar("-test"))
self.assertFalse(is_namedspaced_grammar("_test"))
self.assertFalse(is_namedspaced_grammar(".test"))
# Must contain only a-z, 0-9, -, _, ..
self.assertFalse(is_namedspaced_grammar("test/"))
self.assertFalse(is_namedspaced_grammar('test"'))
self.assertFalse(is_namedspaced_grammar("testö"))
# Must be < 255 characters.
self.assertFalse(is_namedspaced_grammar("t" * 256))

View file

@ -200,6 +200,7 @@ def default_config(
"per_user": {"per_second": 10000, "burst_count": 10000},
},
"rc_3pid_validation": {"per_second": 10000, "burst_count": 10000},
"rc_presence": {"per_user": {"per_second": 10000, "burst_count": 10000}},
"saml2_enabled": False,
"public_baseurl": None,
"default_identity_server": None,
@ -399,11 +400,24 @@ class TestTimeout(Exception):
class test_timeout:
"""
FIXME: This implementation is not robust against other code tight-looping and
preventing the signals propagating and timing out the test. You may need to add
`time.sleep(0.1)` to your code in order to allow this timeout to work correctly.
```py
with test_timeout(3):
while True:
my_checking_func()
time.sleep(0.1)
```
"""
def __init__(self, seconds: int, error_message: Optional[str] = None) -> None:
if error_message is None:
error_message = "test timed out after {}s.".format(seconds)
self.error_message = f"Test timed out after {seconds}s"
if error_message is not None:
self.error_message += f": {error_message}"
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum: int, frame: Optional[FrameType]) -> None:
raise TestTimeout(self.error_message)