mirror of
https://github.com/element-hq/synapse.git
synced 2025-03-13 19:28:44 +00:00
Merge branch 'develop' into schema
This commit is contained in:
commit
9fd0a48ccb
124 changed files with 2363 additions and 795 deletions
|
@ -36,11 +36,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
|||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For PRs, we only run each type of test with the oldest Python version supported (which
|
||||
# is Python 3.8 right now)
|
||||
# is Python 3.9 right now)
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.9",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
|
@ -53,12 +53,12 @@ if not IS_PR:
|
|||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.9", "3.10", "3.11", "3.12", "3.13")
|
||||
for version in ("3.10", "3.11", "3.12", "3.13")
|
||||
)
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.9",
|
||||
"database": "postgres",
|
||||
"postgres-version": "11",
|
||||
"extras": "all",
|
||||
|
@ -77,7 +77,7 @@ if not IS_PR:
|
|||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.9",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
|
@ -99,24 +99,24 @@ set_output("trial_test_matrix", test_matrix)
|
|||
|
||||
# First calculate the various sytest jobs.
|
||||
#
|
||||
# For each type of test we only run on focal on PRs
|
||||
# For each type of test we only run on bullseye on PRs
|
||||
|
||||
|
||||
sytest_tests = [
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
"reactor": "asyncio",
|
||||
|
@ -127,11 +127,11 @@ if not IS_PR:
|
|||
sytest_tests.extend(
|
||||
[
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"reactor": "asyncio",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "postgres",
|
||||
"reactor": "asyncio",
|
||||
},
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
# this script is run by GitHub Actions in a plain `focal` container; it
|
||||
# this script is run by GitHub Actions in a plain `jammy` container; it
|
||||
# - installs the minimal system requirements, and poetry;
|
||||
# - patches the project definition file to refer to old versions only;
|
||||
# - creates a venv with these old versions using poetry; and finally
|
||||
|
|
4
.github/workflows/latest_deps.yml
vendored
4
.github/workflows/latest_deps.yml
vendored
|
@ -132,9 +132,9 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sytest-tag: focal
|
||||
- sytest-tag: bullseye
|
||||
|
||||
- sytest-tag: focal
|
||||
- sytest-tag: bullseye
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
|
|
44
.github/workflows/release-artifacts.yml
vendored
44
.github/workflows/release-artifacts.yml
vendored
|
@ -91,10 +91,19 @@ jobs:
|
|||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Artifact name
|
||||
id: artifact-name
|
||||
# We can't have colons in the upload name of the artifact, so we convert
|
||||
# e.g. `debian:sid` to `sid`.
|
||||
env:
|
||||
DISTRO: ${{ matrix.distro }}
|
||||
run: |
|
||||
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload debs as artifacts
|
||||
uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debs
|
||||
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
|
||||
path: debs/*
|
||||
|
||||
build-wheels:
|
||||
|
@ -102,7 +111,7 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-12]
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
|
@ -112,9 +121,9 @@ jobs:
|
|||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-12"
|
||||
os: "macos-13"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-12"
|
||||
- os: "macos-13"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
|
@ -144,7 +153,7 @@ jobs:
|
|||
|
||||
- name: Only build a single wheel on PR
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
run: echo "CIBW_BUILD="cp39-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
|
@ -156,9 +165,9 @@ jobs:
|
|||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Wheel
|
||||
name: Wheel-${{ matrix.os }}-${{ matrix.arch }}
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
build-sdist:
|
||||
|
@ -177,7 +186,7 @@ jobs:
|
|||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
|
@ -194,19 +203,20 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@v3 # Don't upgrade to v4, it should match upload-artifact
|
||||
uses: actions/download-artifact@v4
|
||||
- name: Build a tarball for the debs
|
||||
run: tar -cvJf debs.tar.xz debs
|
||||
# We need to merge all the debs uploads into one folder, then compress
|
||||
# that.
|
||||
run: |
|
||||
mkdir debs
|
||||
mv debs*/* debs/
|
||||
tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
|
||||
uses: softprops/action-gh-release@v2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
files: |
|
||||
Sdist/*
|
||||
Wheel/*
|
||||
Wheel*/*
|
||||
debs.tar.xz
|
||||
# if it's not already published, keep the release as a draft.
|
||||
draft: true
|
||||
# mark it as a prerelease if the tag contains 'rc'.
|
||||
prerelease: ${{ contains(github.ref, 'rc') }}
|
||||
|
|
14
.github/workflows/tests.yml
vendored
14
.github/workflows/tests.yml
vendored
|
@ -397,7 +397,7 @@ jobs:
|
|||
needs:
|
||||
- linting-done
|
||||
- changes
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
@ -409,12 +409,12 @@ jobs:
|
|||
# their build dependencies
|
||||
- run: |
|
||||
sudo apt-get -qq update
|
||||
sudo apt-get -qq install build-essential libffi-dev python-dev \
|
||||
sudo apt-get -qq install build-essential libffi-dev python3-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
|
@ -458,7 +458,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.8"]
|
||||
python-version: ["pypy-3.9"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
|
@ -580,11 +580,11 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.8"
|
||||
- python-version: "3.9"
|
||||
postgres-version: "11"
|
||||
|
||||
- python-version: "3.11"
|
||||
postgres-version: "15"
|
||||
- python-version: "3.13"
|
||||
postgres-version: "17"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
|
|
4
.github/workflows/twisted_trunk.yml
vendored
4
.github/workflows/twisted_trunk.yml
vendored
|
@ -99,11 +99,11 @@ jobs:
|
|||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
|
||||
# We're using debian:bullseye because it uses Python 3.9 which is our minimum supported Python version.
|
||||
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
|
||||
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
|
||||
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
|
||||
image: matrixdotorg/sytest-synapse:focal
|
||||
image: matrixdotorg/sytest-synapse:bullseye
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
|
|
112
CHANGES.md
112
CHANGES.md
|
@ -1,3 +1,115 @@
|
|||
# Synapse 1.120.0rc1 (2024-11-20)
|
||||
|
||||
This release enables the enforcement of authenticated media by default, with exemptions for media that is already present in the
|
||||
homeserver's media store.
|
||||
|
||||
Most homeservers operating in the public federation will not be impacted by this change, given that
|
||||
the large homeserver `matrix.org` enabled this in September 2024 and therefore most clients and servers
|
||||
will already have updated as a result.
|
||||
|
||||
Some server administrators may still wish to disable this enforcement for the time being, in the interest of compatibility with older clients
|
||||
and older federated homeservers.
|
||||
See the [upgrade notes](https://element-hq.github.io/synapse/v1.120/upgrade.html#authenticated-media-is-now-enforced-by-default) for more information.
|
||||
|
||||
### Features
|
||||
|
||||
- Enforce authenticated media by default. Administrators can revert this by configuring `enable_authenticated_media` to `false`. In a future release of Synapse, this option will be removed and become always-on. ([\#17889](https://github.com/element-hq/synapse/issues/17889))
|
||||
- Add a one-off task to delete old One-Time Keys, to guard against us having old OTKs in the database that the client has long forgotten about. ([\#17934](https://github.com/element-hq/synapse/issues/17934))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Clarify the semantics of the `enable_authenticated_media` configuration option. ([\#17913](https://github.com/element-hq/synapse/issues/17913))
|
||||
- Add documentation about backing up Synapse. ([\#17931](https://github.com/element-hq/synapse/issues/17931))
|
||||
|
||||
### Deprecations and Removals
|
||||
|
||||
- Remove support for [MSC3886: Simple client rendezvous capability](https://github.com/matrix-org/matrix-spec-proposals/pull/3886), which has been superseded by [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108) and therefore closed. ([\#17638](https://github.com/element-hq/synapse/issues/17638))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Addressed some typos in docs and returned error message for unknown MXC ID. ([\#17865](https://github.com/element-hq/synapse/issues/17865))
|
||||
- Unpin the upload release GHA action. ([\#17923](https://github.com/element-hq/synapse/issues/17923))
|
||||
- Bump macOS version used to build wheels during release, as current version used is end-of-life. ([\#17924](https://github.com/element-hq/synapse/issues/17924))
|
||||
- Move server event filtering logic to Rust. ([\#17928](https://github.com/element-hq/synapse/issues/17928))
|
||||
- Support new package name of PyPI package `python-multipart` 0.0.13 so that distro packagers do not need to work around name conflict with PyPI package `multipart`. ([\#17932](https://github.com/element-hq/synapse/issues/17932))
|
||||
- Speed up slow initial sliding syncs on large servers. ([\#17946](https://github.com/element-hq/synapse/issues/17946))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.92 to 1.0.93. ([\#17920](https://github.com/element-hq/synapse/issues/17920))
|
||||
* Bump bleach from 6.1.0 to 6.2.0. ([\#17918](https://github.com/element-hq/synapse/issues/17918))
|
||||
* Bump immutabledict from 4.2.0 to 4.2.1. ([\#17941](https://github.com/element-hq/synapse/issues/17941))
|
||||
* Bump packaging from 24.1 to 24.2. ([\#17940](https://github.com/element-hq/synapse/issues/17940))
|
||||
* Bump phonenumbers from 8.13.49 to 8.13.50. ([\#17942](https://github.com/element-hq/synapse/issues/17942))
|
||||
* Bump pygithub from 2.4.0 to 2.5.0. ([\#17917](https://github.com/element-hq/synapse/issues/17917))
|
||||
* Bump ruff from 0.7.2 to 0.7.3. ([\#17919](https://github.com/element-hq/synapse/issues/17919))
|
||||
* Bump serde from 1.0.214 to 1.0.215. ([\#17938](https://github.com/element-hq/synapse/issues/17938))
|
||||
|
||||
# Synapse 1.119.0 (2024-11-13)
|
||||
|
||||
No significant changes since 1.119.0rc2.
|
||||
|
||||
### Python 3.8 support dropped
|
||||
|
||||
Python 3.8 is [end-of-life](https://devguide.python.org/versions/) and is no longer supported by Synapse. The minimum supported Python version is now 3.9.
|
||||
|
||||
If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or greater) before upgrading Synapse.
|
||||
|
||||
|
||||
# Synapse 1.119.0rc2 (2024-11-11)
|
||||
|
||||
Note that due to packaging issues there was no v1.119.0rc1.
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
- Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. ([\#17374](https://github.com/element-hq/synapse/issues/17374))
|
||||
- Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2). ([\#17888](https://github.com/element-hq/synapse/issues/17888))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug with sliding sync where `$LAZY`-loading room members would not return `required_state` membership in incremental syncs. ([\#17809](https://github.com/element-hq/synapse/issues/17809))
|
||||
- Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi. ([\#17839](https://github.com/element-hq/synapse/issues/17839))
|
||||
- Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in
|
||||
the config option `run_background_tasks_on`. ([\#17847](https://github.com/element-hq/synapse/issues/17847))
|
||||
- Fix bug where some presence and typing timeouts can expire early. ([\#17850](https://github.com/element-hq/synapse/issues/17850))
|
||||
- Fix detection when the built Rust library was outdated when using source installations. ([\#17861](https://github.com/element-hq/synapse/issues/17861))
|
||||
- Fix a long-standing bug in Synapse which could cause one-time keys to be issued in the incorrect order, causing message decryption failures. ([\#17903](https://github.com/element-hq/synapse/pull/17903))
|
||||
- Fix experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2) where we would return the full state on incremental syncs when using lazy loaded members and there were no new events in the timeline. ([\#17915](https://github.com/element-hq/synapse/pull/17915))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Remove support for python 3.8. ([\#17908](https://github.com/element-hq/synapse/issues/17908))
|
||||
- Add a test for downloading and thumbnailing a CMYK JPEG. ([\#17786](https://github.com/element-hq/synapse/issues/17786))
|
||||
- Refactor database calls to remove `Generator` usage. ([\#17813](https://github.com/element-hq/synapse/issues/17813), [\#17814](https://github.com/element-hq/synapse/issues/17814), [\#17815](https://github.com/element-hq/synapse/issues/17815), [\#17816](https://github.com/element-hq/synapse/issues/17816), [\#17817](https://github.com/element-hq/synapse/issues/17817), [\#17818](https://github.com/element-hq/synapse/issues/17818), [\#17890](https://github.com/element-hq/synapse/issues/17890))
|
||||
- Include the destination in the error of 'Destination mismatch' on federation requests. ([\#17830](https://github.com/element-hq/synapse/issues/17830))
|
||||
- The nix flake inside the repository no longer tracks nixpkgs/master to not catch the latest bugs from a PR merged 5 minutes ago. ([\#17852](https://github.com/element-hq/synapse/issues/17852))
|
||||
- Minor speed-up of sliding sync by computing extensions results in parallel. ([\#17884](https://github.com/element-hq/synapse/issues/17884))
|
||||
- Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12. ([\#17887](https://github.com/element-hq/synapse/issues/17887))
|
||||
- Remove usage of internal header encoding API. ([\#17894](https://github.com/element-hq/synapse/issues/17894))
|
||||
- Use unique name for each os.arch variant when uploading Wheel artifacts. ([\#17905](https://github.com/element-hq/synapse/issues/17905))
|
||||
- Fix tests to run with latest Twisted. ([\#17906](https://github.com/element-hq/synapse/pull/17906), [\#17907](https://github.com/element-hq/synapse/pull/17907), [\#17911](https://github.com/element-hq/synapse/pull/17911))
|
||||
- Update version constraint to allow the latest poetry-core 1.9.1. ([\#17902](https://github.com/element-hq/synapse/pull/17902))
|
||||
- Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. ([\#17909](https://github.com/element-hq/synapse/pull/17909))
|
||||
- Add an index to `current_state_delta_stream` table. ([\#17912](https://github.com/element-hq/synapse/issues/17912))
|
||||
- Fix building and attaching release artifacts during the release process. ([\#17921](https://github.com/element-hq/synapse/issues/17921))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/download-artifact & actions/upload-artifact from 3 to 4 in /.github/workflows. ([\#17657](https://github.com/element-hq/synapse/issues/17657))
|
||||
* Bump anyhow from 1.0.89 to 1.0.92. ([\#17858](https://github.com/element-hq/synapse/issues/17858), [\#17876](https://github.com/element-hq/synapse/issues/17876), [\#17901](https://github.com/element-hq/synapse/issues/17901))
|
||||
* Bump bytes from 1.7.2 to 1.8.0. ([\#17877](https://github.com/element-hq/synapse/issues/17877))
|
||||
* Bump cryptography from 43.0.1 to 43.0.3. ([\#17853](https://github.com/element-hq/synapse/issues/17853))
|
||||
* Bump mypy-zope from 1.0.7 to 1.0.8. ([\#17898](https://github.com/element-hq/synapse/issues/17898))
|
||||
* Bump phonenumbers from 8.13.47 to 8.13.49. ([\#17880](https://github.com/element-hq/synapse/issues/17880), [\#17899](https://github.com/element-hq/synapse/issues/17899))
|
||||
* Bump python-multipart from 0.0.12 to 0.0.16. ([\#17879](https://github.com/element-hq/synapse/issues/17879))
|
||||
* Bump regex from 1.11.0 to 1.11.1. ([\#17874](https://github.com/element-hq/synapse/issues/17874))
|
||||
* Bump ruff from 0.6.9 to 0.7.2. ([\#17868](https://github.com/element-hq/synapse/issues/17868), [\#17897](https://github.com/element-hq/synapse/issues/17897))
|
||||
* Bump serde from 1.0.210 to 1.0.214. ([\#17875](https://github.com/element-hq/synapse/issues/17875), [\#17900](https://github.com/element-hq/synapse/issues/17900))
|
||||
* Bump serde_json from 1.0.128 to 1.0.132. ([\#17857](https://github.com/element-hq/synapse/issues/17857))
|
||||
* Bump types-psycopg2 from 2.9.21.20240819 to 2.9.21.20241019. ([\#17855](https://github.com/element-hq/synapse/issues/17855))
|
||||
* Bump types-setuptools from 75.1.0.20241014 to 75.2.0.20241019. ([\#17856](https://github.com/element-hq/synapse/issues/17856))
|
||||
|
||||
# Synapse 1.118.0 (2024-10-29)
|
||||
|
||||
No significant changes since 1.118.0rc1.
|
||||
|
|
16
Cargo.lock
generated
16
Cargo.lock
generated
|
@ -13,9 +13,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.92"
|
||||
version = "1.0.93"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13"
|
||||
checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
|
@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
|||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.214"
|
||||
version = "1.0.215"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5"
|
||||
checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.214"
|
||||
version = "1.0.215"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766"
|
||||
checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -505,9 +505,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.132"
|
||||
version = "1.0.133"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03"
|
||||
checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API.
|
|
@ -1 +0,0 @@
|
|||
Add a test for downloading and thumbnailing a CMYK JPEG.
|
|
@ -1 +0,0 @@
|
|||
Fix bug with sliding sync where `$LAZY`-loading room members would not return `required_state` membership in incremental syncs.
|
|
@ -1 +0,0 @@
|
|||
Avoid lost data on some database query retries.
|
|
@ -1 +0,0 @@
|
|||
Avoid lost data on some database query retries.
|
|
@ -1 +0,0 @@
|
|||
Avoid lost data on some database query retries.
|
|
@ -1 +0,0 @@
|
|||
Avoid lost data on some database query retries.
|
|
@ -1 +0,0 @@
|
|||
Avoid lost data on some database query retries.
|
|
@ -1 +0,0 @@
|
|||
Avoid lost data on some database query retries.
|
|
@ -1 +0,0 @@
|
|||
Include the destination in the error of 'Destination mismatch' on federation requests.
|
|
@ -1 +0,0 @@
|
|||
Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi.
|
|
@ -1,2 +0,0 @@
|
|||
Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in
|
||||
the config option `run_background_tasks_on`.
|
|
@ -1 +0,0 @@
|
|||
Fix detection when the built Rust library was outdated when using source installations.
|
1
changelog.d/17872.doc
Normal file
1
changelog.d/17872.doc
Normal file
|
@ -0,0 +1 @@
|
|||
Add OIDC example configuration for Forgejo (fork of Gitea).
|
|
@ -1 +0,0 @@
|
|||
Minor speed-up of sliding sync by computing extensions results in parallel.
|
|
@ -1 +0,0 @@
|
|||
Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12.
|
|
@ -1 +0,0 @@
|
|||
Remove usage of internal header encoding API.
|
1
changelog.d/17933.bugfix
Normal file
1
changelog.d/17933.bugfix
Normal file
|
@ -0,0 +1 @@
|
|||
Fix long-standing bug where read receipts could get overly delayed being sent over federation.
|
1
changelog.d/17936.misc
Normal file
1
changelog.d/17936.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Fix incorrect comment in new schema delta.
|
1
changelog.d/17944.misc
Normal file
1
changelog.d/17944.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Raise setuptools_rust version cap to 1.10.2.
|
1
changelog.d/17945.misc
Normal file
1
changelog.d/17945.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Enable encrypted appservice related experimental features in the complement docker image.
|
1
changelog.d/17952.misc
Normal file
1
changelog.d/17952.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Return whether the user is suspended when querying the user account in the Admin API.
|
1
changelog.d/17953.doc
Normal file
1
changelog.d/17953.doc
Normal file
|
@ -0,0 +1 @@
|
|||
Link to element-docker-demo from contrib/docker*.
|
|
@ -30,3 +30,6 @@ docker-compose up -d
|
|||
### More information
|
||||
|
||||
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
|
||||
|
||||
**For a more comprehensive Docker Compose example showcasing a full Matrix 2.0 stack, please see
|
||||
https://github.com/element-hq/element-docker-demo**
|
|
@ -8,6 +8,9 @@ All examples and snippets assume that your Synapse service is called `synapse` i
|
|||
|
||||
An example Docker Compose file can be found [here](docker-compose.yaml).
|
||||
|
||||
**For a more comprehensive Docker Compose example, showcasing a full Matrix 2.0 stack (originally based on this
|
||||
docker-compose.yaml), please see https://github.com/element-hq/element-docker-demo**
|
||||
|
||||
## Worker Service Examples in Docker Compose
|
||||
|
||||
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).
|
||||
|
|
24
debian/changelog
vendored
24
debian/changelog
vendored
|
@ -1,3 +1,27 @@
|
|||
matrix-synapse-py3 (1.120.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.120.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Nov 2024 15:02:21 +0000
|
||||
|
||||
matrix-synapse-py3 (1.119.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.119.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 13 Nov 2024 13:57:51 +0000
|
||||
|
||||
matrix-synapse-py3 (1.119.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.119.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Nov 2024 14:33:02 +0000
|
||||
|
||||
matrix-synapse-py3 (1.119.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.119.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Nov 2024 08:59:43 -0700
|
||||
|
||||
matrix-synapse-py3 (1.118.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.118.0.
|
||||
|
|
|
@ -104,6 +104,16 @@ experimental_features:
|
|||
msc3967_enabled: true
|
||||
# Expose a room summary for public rooms
|
||||
msc3266_enabled: true
|
||||
# Send to-device messages to application services
|
||||
msc2409_to_device_messages_enabled: true
|
||||
# Allow application services to masquerade devices
|
||||
msc3202_device_masquerading: true
|
||||
# Sending device list changes, one-time key counts and fallback key usage to application services
|
||||
msc3202_transaction_extensions: true
|
||||
# Proxy OTK claim requests to exclusive ASes
|
||||
msc3983_appservice_otk_claims: true
|
||||
# Proxy key queries to exclusive ASes
|
||||
msc3984_appservice_key_query: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
- [Using `synctl` with Workers](synctl_workers.md)
|
||||
- [Systemd](systemd-with-workers/README.md)
|
||||
- [Administration](usage/administration/README.md)
|
||||
- [Backups](usage/administration/backups.md)
|
||||
- [Admin API](usage/administration/admin_api/README.md)
|
||||
- [Account Validity](admin_api/account_validity.md)
|
||||
- [Background Updates](usage/administration/admin_api/background_updates.md)
|
||||
|
|
|
@ -5,6 +5,7 @@ basis. The currently supported features are:
|
|||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
|
||||
- [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222): adding `state_after` to sync v2
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
|
|
@ -55,7 +55,8 @@ It returns a JSON body like the following:
|
|||
}
|
||||
],
|
||||
"user_type": null,
|
||||
"locked": false
|
||||
"locked": false,
|
||||
"suspended": false
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
|
|||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bullseye
|
||||
```
|
||||
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
|
||||
|
||||
|
|
|
@ -336,6 +336,36 @@ but it has a `response_types_supported` which excludes "code" (which we rely on,
|
|||
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
|
||||
so we have to disable discovery and configure the URIs manually.
|
||||
|
||||
### Forgejo
|
||||
|
||||
Forgejo is a fork of Gitea that can act as an OAuth2 provider.
|
||||
|
||||
The implementation of OAuth2 is improved compared to Gitea, as it provides a correctly defined `subject_claim` and `scopes`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: forgejo
|
||||
idp_name: Forgejo
|
||||
discover: false
|
||||
issuer: "https://your-forgejo.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: client_secret_post
|
||||
scopes: ["openid", "profile", "email", "groups"]
|
||||
authorization_endpoint: "https://your-forgejo.com/login/oauth/authorize"
|
||||
token_endpoint: "https://your-forgejo.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://your-forgejo.com/api/v1/user"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "sub"
|
||||
picture_claim: "picture"
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
### GitHub
|
||||
|
||||
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but
|
||||
|
|
|
@ -100,6 +100,10 @@ database:
|
|||
keepalives_count: 3
|
||||
```
|
||||
|
||||
## Backups
|
||||
|
||||
Don't forget to [back up](./usage/administration/backups.md#database) your database!
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
|
|
|
@ -208,7 +208,7 @@ When following this route please make sure that the [Platform-specific prerequis
|
|||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.8 or later, up to Python 3.11.
|
||||
- Python 3.9 or later, up to Python 3.13.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
|
@ -656,6 +656,10 @@ This also requires the optional `lxml` python dependency to be installed. This
|
|||
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
|
||||
means `apt-get install libxml2-dev`, or equivalent for your OS.
|
||||
|
||||
### Backups
|
||||
|
||||
Don't forget to take [backups](../usage/administration/backups.md) of your new server!
|
||||
|
||||
### Troubleshooting Installation
|
||||
|
||||
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
|
|
|
@ -117,6 +117,51 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
|||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.120.0
|
||||
|
||||
## Removal of experimental MSC3886 feature
|
||||
|
||||
[MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886)
|
||||
has been closed (and will not enter the Matrix spec). As such, we are
|
||||
removing the experimental support for it in this release.
|
||||
|
||||
The `experimental_features.msc3886_endpoint` configuration option has
|
||||
been removed.
|
||||
|
||||
## Authenticated media is now enforced by default
|
||||
|
||||
The [`enable_authenticated_media`] configuration option now defaults to true.
|
||||
|
||||
This means that clients and remote (federated) homeservers now need to use
|
||||
the authenticated media endpoints in order to download media from your
|
||||
homeserver.
|
||||
|
||||
As an exception, existing media that was stored on the server prior to
|
||||
this option changing to `true` will still be accessible over the
|
||||
unauthenticated endpoints.
|
||||
|
||||
The matrix.org homeserver has already been running with this option enabled
|
||||
since September 2024, so most common clients and homeservers should already
|
||||
be compatible.
|
||||
|
||||
With that said, administrators who wish to disable this feature for broader
|
||||
compatibility can still do so by manually configuring
|
||||
`enable_authenticated_media: False`.
|
||||
|
||||
[`enable_authenticated_media`]: usage/configuration/config_documentation.md#enable_authenticated_media
|
||||
|
||||
|
||||
# Upgrading to v1.119.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
||||
The minimum supported Python version has been increased from v3.8 to v3.9.
|
||||
You will need Python 3.9+ to run Synapse v1.119.0 (due out Nov 7th, 2024).
|
||||
|
||||
If you use current versions of the Matrix.org-distributed Docker images, no action is required.
|
||||
Please note that support for Ubuntu `focal` was dropped as well since it uses Python 3.8.
|
||||
|
||||
|
||||
# Upgrading to v1.111.0
|
||||
|
||||
## New worker endpoints for authenticated client and federation media
|
||||
|
|
125
docs/usage/administration/backups.md
Normal file
125
docs/usage/administration/backups.md
Normal file
|
@ -0,0 +1,125 @@
|
|||
# How to back up a Synapse homeserver
|
||||
|
||||
It is critical to maintain good backups of your server, to guard against
|
||||
hardware failure as well as potential corruption due to bugs or administrator
|
||||
error.
|
||||
|
||||
This page documents the things you will need to consider backing up as part of
|
||||
a Synapse installation.
|
||||
|
||||
## Configuration files
|
||||
|
||||
Keep a copy of your configuration file (`homeserver.yaml`), as well as any
|
||||
auxiliary config files it refers to such as the
|
||||
[`log_config`](../configuration/config_documentation.md#log_config) file,
|
||||
[`app_service_config_files`](../configuration/config_documentation.md#app_service_config_files).
|
||||
Often, all such config files will be kept in a single directory such as
|
||||
`/etc/synapse`, which will make this easier.
|
||||
|
||||
## Server signing key
|
||||
|
||||
Your server has a [signing
|
||||
key](../configuration/config_documentation.md#signing_key_path) which it uses
|
||||
to sign events and outgoing federation requests. It is easiest to back it up
|
||||
with your configuration files, but an alternative is to have Synapse create a
|
||||
new signing key if you have to restore.
|
||||
|
||||
If you do decide to replace the signing key, you should add the old *public*
|
||||
key to
|
||||
[`old_signing_keys`](../configuration/config_documentation.md#old_signing_keys).
|
||||
|
||||
## Database
|
||||
|
||||
Synapse's support for SQLite is only suitable for testing purposes, so for the
|
||||
purposes of this document, we'll assume you are using
|
||||
[PostgreSQL](../../postgres.md).
|
||||
|
||||
A full discussion of backup strategies for PostgreSQL is out of scope for this
|
||||
document; see the [PostgreSQL
|
||||
documentation](https://www.postgresql.org/docs/current/backup.html) for
|
||||
detailed information.
|
||||
|
||||
### Synapse-specfic details
|
||||
|
||||
* Be very careful not to restore into a database that already has tables
|
||||
present. At best, this will error; at worst, it will lead to subtle database
|
||||
inconsistencies.
|
||||
|
||||
* The `e2e_one_time_keys_json` table should **not** be backed up, or if it is
|
||||
backed up, should be
|
||||
[`TRUNCATE`d](https://www.postgresql.org/docs/current/sql-truncate.html)
|
||||
after restoring the database before Synapse is started.
|
||||
|
||||
[Background: restoring the database to an older backup can cause
|
||||
used one-time-keys to be re-issued, causing subsequent [message decryption
|
||||
errors](https://github.com/element-hq/element-meta/issues/2155). Clearing
|
||||
all one-time-keys from the database ensures that this cannot happen, and
|
||||
will prompt clients to generate and upload new one-time-keys.]
|
||||
|
||||
### Quick and easy database backup and restore
|
||||
|
||||
Typically, the easiest solution is to use `pg_dump` to take a copy of the whole
|
||||
database. We recommend `pg_dump`'s custom dump format, as it produces
|
||||
significantly smaller backup files.
|
||||
|
||||
```shell
|
||||
sudo -u postgres pg_dump -Fc --exclude-table-data e2e_one_time_keys_json synapse > synapse.dump
|
||||
```
|
||||
|
||||
There is no need to stop Postgres or Synapse while `pg_dump` is running: it
|
||||
will take a consistent snapshot of the databse.
|
||||
|
||||
To restore, you will need to recreate the database as described in [Using
|
||||
Postgres](../../postgres.md#set-up-database),
|
||||
then load the dump into it with `pg_restore`:
|
||||
|
||||
```shell
|
||||
sudo -u postgres createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse
|
||||
sudo -u postgres pg_restore -d synapse < synapse.dump
|
||||
```
|
||||
|
||||
(If you forgot to exclude `e2e_one_time_keys_json` during `pg_dump`, remember
|
||||
to connect to the new database and `TRUNCATE e2e_one_time_keys_json;` before
|
||||
starting Synapse.)
|
||||
|
||||
To reiterate: do **not** restore a dump over an existing database.
|
||||
|
||||
Again, if you plan to run your homeserver at any sort of production level, we
|
||||
recommend studying the PostgreSQL documentation on backup options.
|
||||
|
||||
## Media store
|
||||
|
||||
Synapse keeps a copy of media uploaded by users, including avatars and message
|
||||
attachments, in its [Media
|
||||
store](../configuration/config_documentation.md#media-store).
|
||||
|
||||
It is a directory on the local disk, containing the following directories:
|
||||
|
||||
* `local_content`: this is content uploaded by your local users. As a general
|
||||
rule, you should back this up: it may represent the only copy of those
|
||||
media files anywhere in the federation, and if they are lost, users will
|
||||
see errors when viewing user or room avatars, and messages with attachments.
|
||||
|
||||
* `local_thumbnails`: "thumbnails" of images uploaded by your users. If
|
||||
[`dynamic_thumbnails`](../configuration/config_documentation.md#dynamic_thumbnails)
|
||||
is enabled, these will be regenerated if they are removed from the disk, and
|
||||
there is therefore no need to back them up.
|
||||
|
||||
If `dynamic_thumbnails` is *not* enabled (the default): although this can
|
||||
theoretically be regenerated from `local_content`, there is no tooling to do
|
||||
so. We recommend that these are backed up too.
|
||||
|
||||
* `remote_content`: this is a cache of content that was uploaded by a user on
|
||||
another server, and has since been requested by a user on your own server.
|
||||
|
||||
Typically there is no need to back up this directory: if a file in this directory
|
||||
is removed, Synapse will attempt to fetch it again from the remote
|
||||
server.
|
||||
|
||||
* `remote_thumbnails`: thumbnails of images uploaded by users on other
|
||||
servers. As with `remote_content`, there is normally no need to back this
|
||||
up.
|
||||
|
||||
* `url_cache`, `url_cache_thumbnails`: temporary caches of files downloaded
|
||||
by the [URL previews](../../setup/installation.md#url-previews) feature.
|
||||
These do not need to be backed up.
|
|
@ -1812,11 +1812,26 @@ Config options related to Synapse's media store.
|
|||
---
|
||||
### `enable_authenticated_media`
|
||||
|
||||
*(boolean)* When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) – requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false after enabling, media marked as authenticated will be available over legacy endpoints. Will default to true in a future Synapse release. Defaults to `false`.
|
||||
*(boolean)* When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) – requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to true (previously false). In a future release of Synapse, this option will be removed and become always-on.
|
||||
|
||||
In all cases, authenticated requests to download media will succeed, but for unauthenticated requests, this case-by-case breakdown describes whether media downloads are permitted:
|
||||
|
||||
* `enable_authenticated_media = False`:
|
||||
* unauthenticated client or homeserver requesting local media: allowed
|
||||
* unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache, or as long as the remote homeserver does not require authentication to retrieve the media
|
||||
* `enable_authenticated_media = True`:
|
||||
* unauthenticated client or homeserver requesting local media: allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); otherwise denied.
|
||||
* unauthenticated client or homeserver requesting remote media: the same as for local media; allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); otherwise denied.
|
||||
|
||||
It is especially notable that media downloaded before this option existed (in older Synapse versions), or whilst this option was set to `False`, will perpetually be available over the legacy, unauthenticated endpoint, even after this option is set to `True`. This is for backwards compatibility with older clients and homeservers that do not yet support requesting authenticated media; those older clients or homeservers will not be cut off from media they can already see.
|
||||
|
||||
_Changed in Synapse 1.120:_ This option now defaults to `True` when not set, whereas before this version it defaulted to `False`.
|
||||
|
||||
Defaults to `true`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_authenticated_media: true
|
||||
enable_authenticated_media: false
|
||||
```
|
||||
---
|
||||
### `enable_media_repo`
|
||||
|
@ -2911,6 +2926,13 @@ signing_key_path: CONFDIR/SERVERNAME.signing.key
|
|||
|
||||
It is possible to build an entry from an old `signing.key` file using the `export_signing_key` script which is provided with synapse.
|
||||
|
||||
If you have lost the private key file, you can ask another server you trust to tell you the public keys it has seen from your server. To fetch the keys from `matrix.org`, try something like:
|
||||
|
||||
```
|
||||
curl https://matrix-federation.matrix.org/_matrix/key/v2/query/myserver.example.com |
|
||||
jq '.server_keys | map(.verify_keys) | add'
|
||||
```
|
||||
|
||||
Defaults to `{}`.
|
||||
|
||||
Example configuration:
|
||||
|
|
56
flake.lock
generated
56
flake.lock
generated
|
@ -56,24 +56,6 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
|
@ -186,27 +168,27 @@
|
|||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1690535733,
|
||||
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
|
||||
"lastModified": 1729265718,
|
||||
"narHash": "sha256-4HQI+6LsO3kpWTYuVGIzhJs1cetFcwT7quWCk/6rqeo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
|
||||
"rev": "ccc0c2126893dd20963580b6478d1a10a4512185",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "master",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681358109,
|
||||
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
|
||||
"lastModified": 1728538411,
|
||||
"narHash": "sha256-f0SBJz1eZ2yOuKUr5CA9BHULGXVSn6miBuUWdTyhUhU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
|
||||
"rev": "b69de56fac8c2b6f8fd27f2eca01dcda8e0a4221",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -249,20 +231,19 @@
|
|||
"devenv": "devenv",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"rust-overlay": "rust-overlay",
|
||||
"systems": "systems_3"
|
||||
"systems": "systems_2"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1693966243,
|
||||
"narHash": "sha256-a2CA1aMIPE67JWSVIGoGtD3EGlFdK9+OlJQs0FOWCKY=",
|
||||
"lastModified": 1731897198,
|
||||
"narHash": "sha256-Ou7vLETSKwmE/HRQz4cImXXJBr/k9gp4J4z/PF8LzTE=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "a8b4bb4cbb744baaabc3e69099f352f99164e2c1",
|
||||
"rev": "0be641045af6d8666c11c2c40e45ffc9667839b5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -300,21 +281,6 @@
|
|||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
|
26
flake.nix
26
flake.nix
|
@ -3,13 +3,13 @@
|
|||
# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
|
||||
# installed automatically.
|
||||
#
|
||||
# You must have already installed Nix (https://nixos.org) on your system to use this.
|
||||
# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
|
||||
# directly supported, but Nix can be installed inside of WSL2 or even Docker
|
||||
# You must have already installed Nix (https://nixos.org/download/) on your system to use this.
|
||||
# Nix can be installed on any Linux distribiution or MacOS; NixOS is not required.
|
||||
# Windows is not directly supported, but Nix can be installed inside of WSL2 or even Docker
|
||||
# containers. Please refer to https://nixos.org/download for details.
|
||||
#
|
||||
# You must also enable support for flakes in Nix. See the following for how to
|
||||
# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
||||
# do so permanently: https://wiki.nixos.org/wiki/Flakes#Other_Distros,_without_Home-Manager
|
||||
#
|
||||
# Be warned: you'll need over 3.75 GB of free space to download all the dependencies.
|
||||
#
|
||||
|
@ -20,7 +20,7 @@
|
|||
# locally from "services", such as PostgreSQL and Redis.
|
||||
#
|
||||
# You should now be dropped into a new shell with all programs and dependencies
|
||||
# availabile to you!
|
||||
# available to you!
|
||||
#
|
||||
# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by
|
||||
# running: `devenv up`. To stop them, use Ctrl-C.
|
||||
|
@ -39,9 +39,9 @@
|
|||
|
||||
{
|
||||
inputs = {
|
||||
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
|
||||
# Use the rolling/unstable branch of nixpkgs. Used to fetch the latest
|
||||
# available versions of packages.
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/master";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
|
||||
systems.url = "github:nix-systems/default";
|
||||
# A development environment manager built on Nix. See https://devenv.sh.
|
||||
|
@ -50,7 +50,7 @@
|
|||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
|
||||
outputs = { nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
|
||||
let
|
||||
forEachSystem = nixpkgs.lib.genAttrs (import systems);
|
||||
in {
|
||||
|
@ -82,7 +82,7 @@
|
|||
#
|
||||
# NOTE: We currently need to set the Rust version unnecessarily high
|
||||
# in order to work around https://github.com/matrix-org/synapse/issues/15939
|
||||
(rust-bin.stable."1.71.1".default.override {
|
||||
(rust-bin.stable."1.82.0".default.override {
|
||||
# Additionally install the "rust-src" extension to allow diving into the
|
||||
# Rust source code in an IDE (rust-analyzer will also make use of it).
|
||||
extensions = [ "rust-src" ];
|
||||
|
@ -126,7 +126,7 @@
|
|||
# Automatically activate the poetry virtualenv upon entering the shell.
|
||||
languages.python.poetry.activate.enable = true;
|
||||
# Install all extra Python dependencies; this is needed to run the unit
|
||||
# tests and utilitise all Synapse features.
|
||||
# tests and utilise all Synapse features.
|
||||
languages.python.poetry.install.arguments = ["--extras all"];
|
||||
# Install the 'matrix-synapse' package from the local checkout.
|
||||
languages.python.poetry.install.installRootPackage = true;
|
||||
|
@ -163,8 +163,8 @@
|
|||
# Create a postgres user called 'synapse_user' which has ownership
|
||||
# over the 'synapse' database.
|
||||
services.postgres.initialScript = ''
|
||||
CREATE USER synapse_user;
|
||||
ALTER DATABASE synapse OWNER TO synapse_user;
|
||||
CREATE USER synapse_user;
|
||||
ALTER DATABASE synapse OWNER TO synapse_user;
|
||||
'';
|
||||
|
||||
# Redis is needed in order to run Synapse in worker mode.
|
||||
|
@ -205,7 +205,7 @@
|
|||
# corresponding Nix packages on https://search.nixos.org/packages.
|
||||
#
|
||||
# This was done until `./install-deps.pl --dryrun` produced no output.
|
||||
env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [
|
||||
env.PERL5LIB = "${with pkgs.perl538Packages; makePerlPath [
|
||||
DBI
|
||||
ClassMethodModifiers
|
||||
CryptEd25519
|
||||
|
|
2
mypy.ini
2
mypy.ini
|
@ -26,7 +26,7 @@ strict_equality = True
|
|||
|
||||
# Run mypy type checking with the minimum supported Python version to catch new usage
|
||||
# that isn't backwards-compatible (types, overloads, etc).
|
||||
python_version = 3.8
|
||||
python_version = 3.9
|
||||
|
||||
files =
|
||||
docker/,
|
||||
|
|
131
poetry.lock
generated
131
poetry.lock
generated
|
@ -11,9 +11,6 @@ files = [
|
|||
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
|
||||
|
||||
[[package]]
|
||||
name = "attrs"
|
||||
version = "24.2.0"
|
||||
|
@ -107,21 +104,20 @@ typecheck = ["mypy"]
|
|||
|
||||
[[package]]
|
||||
name = "bleach"
|
||||
version = "6.1.0"
|
||||
version = "6.2.0"
|
||||
description = "An easy safelist-based HTML-sanitizing tool."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"},
|
||||
{file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"},
|
||||
{file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"},
|
||||
{file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
six = ">=1.9.0"
|
||||
webencodings = "*"
|
||||
|
||||
[package.extras]
|
||||
css = ["tinycss2 (>=1.1.0,<1.3)"]
|
||||
css = ["tinycss2 (>=1.1.0,<1.5)"]
|
||||
|
||||
[[package]]
|
||||
name = "canonicaljson"
|
||||
|
@ -728,13 +724,13 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "immutabledict"
|
||||
version = "4.2.0"
|
||||
version = "4.2.1"
|
||||
description = "Immutable wrapper around dictionaries (a fork of frozendict)"
|
||||
optional = false
|
||||
python-versions = ">=3.8,<4.0"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "immutabledict-4.2.0-py3-none-any.whl", hash = "sha256:d728b2c2410d698d95e6200237feb50a695584d20289ad3379a439aa3d90baba"},
|
||||
{file = "immutabledict-4.2.0.tar.gz", hash = "sha256:e003fd81aad2377a5a758bf7e1086cf3b70b63e9a5cc2f46bce8d0a2b4727c5f"},
|
||||
{file = "immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373"},
|
||||
{file = "immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -874,9 +870,7 @@ files = [
|
|||
|
||||
[package.dependencies]
|
||||
attrs = ">=22.2.0"
|
||||
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
|
||||
jsonschema-specifications = ">=2023.03.6"
|
||||
pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
|
||||
referencing = ">=0.28.4"
|
||||
rpds-py = ">=0.7.1"
|
||||
|
||||
|
@ -896,7 +890,6 @@ files = [
|
|||
]
|
||||
|
||||
[package.dependencies]
|
||||
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
|
||||
referencing = ">=0.28.0"
|
||||
|
||||
[[package]]
|
||||
|
@ -912,7 +905,6 @@ files = [
|
|||
|
||||
[package.dependencies]
|
||||
importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""}
|
||||
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
|
||||
"jaraco.classes" = "*"
|
||||
jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""}
|
||||
pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""}
|
||||
|
@ -1426,13 +1418,13 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte
|
|||
|
||||
[[package]]
|
||||
name = "packaging"
|
||||
version = "24.1"
|
||||
version = "24.2"
|
||||
description = "Core utilities for Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
|
||||
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
|
||||
{file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
|
||||
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -1451,13 +1443,13 @@ dev = ["jinja2"]
|
|||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "8.13.49"
|
||||
version = "8.13.50"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "phonenumbers-8.13.49-py2.py3-none-any.whl", hash = "sha256:e17140955ab3d8f9580727372ea64c5ada5327932d6021ef6fd203c3db8c8139"},
|
||||
{file = "phonenumbers-8.13.49.tar.gz", hash = "sha256:e608ccb61f0bd42e6db1d2c421f7c22186b88f494870bf40aa31d1a2718ab0ae"},
|
||||
{file = "phonenumbers-8.13.50-py2.py3-none-any.whl", hash = "sha256:bb95dbc0d9979c51f7ad94bcd780784938958861fbb4b75a2fe39ccd3d58954a"},
|
||||
{file = "phonenumbers-8.13.50.tar.gz", hash = "sha256:e05ac6fb7b98c6d719a87ea895b9fc153673b4a51f455ec9afaf557ef4629da6"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -1571,17 +1563,6 @@ files = [
|
|||
[package.extras]
|
||||
testing = ["pytest", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "pkgutil-resolve-name"
|
||||
version = "1.3.10"
|
||||
description = "Resolve a name to an object."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
|
||||
{file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prometheus-client"
|
||||
version = "0.21.0"
|
||||
|
@ -1803,13 +1784,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
|||
|
||||
[[package]]
|
||||
name = "pygithub"
|
||||
version = "2.4.0"
|
||||
version = "2.5.0"
|
||||
description = "Use the full Github API v3"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "PyGithub-2.4.0-py3-none-any.whl", hash = "sha256:81935aa4bdc939fba98fee1cb47422c09157c56a27966476ff92775602b9ee24"},
|
||||
{file = "pygithub-2.4.0.tar.gz", hash = "sha256:6601e22627e87bac192f1e2e39c6e6f69a43152cfb8f307cee575879320b3051"},
|
||||
{file = "PyGithub-2.5.0-py3-none-any.whl", hash = "sha256:b0b635999a658ab8e08720bdd3318893ff20e2275f6446fcf35bf3f44f2c0fd2"},
|
||||
{file = "pygithub-2.5.0.tar.gz", hash = "sha256:e1613ac508a9be710920d26eb18b1905ebd9926aa49398e88151c1b526aad3cf"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -1948,7 +1929,6 @@ files = [
|
|||
[package.dependencies]
|
||||
cryptography = ">=3.1"
|
||||
defusedxml = "*"
|
||||
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
|
||||
pyopenssl = "*"
|
||||
python-dateutil = "*"
|
||||
pytz = "*"
|
||||
|
@ -2164,7 +2144,6 @@ files = [
|
|||
[package.dependencies]
|
||||
markdown-it-py = ">=2.2.0,<3.0.0"
|
||||
pygments = ">=2.13.0,<3.0.0"
|
||||
typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
|
||||
|
||||
[package.extras]
|
||||
jupyter = ["ipywidgets (>=7.5.1,<9)"]
|
||||
|
@ -2277,29 +2256,29 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.7.2-py3-none-linux_armv6l.whl", hash = "sha256:b73f873b5f52092e63ed540adefc3c36f1f803790ecf2590e1df8bf0a9f72cb8"},
|
||||
{file = "ruff-0.7.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5b813ef26db1015953daf476202585512afd6a6862a02cde63f3bafb53d0b2d4"},
|
||||
{file = "ruff-0.7.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:853277dbd9675810c6826dad7a428d52a11760744508340e66bf46f8be9701d9"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21aae53ab1490a52bf4e3bf520c10ce120987b047c494cacf4edad0ba0888da2"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ccc7e0fc6e0cb3168443eeadb6445285abaae75142ee22b2b72c27d790ab60ba"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd77877a4e43b3a98e5ef4715ba3862105e299af0c48942cc6d51ba3d97dc859"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e00163fb897d35523c70d71a46fbaa43bf7bf9af0f4534c53ea5b96b2e03397b"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3c54b538633482dc342e9b634d91168fe8cc56b30a4b4f99287f4e339103e88"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b792468e9804a204be221b14257566669d1db5c00d6bb335996e5cd7004ba80"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dba53ed84ac19ae4bfb4ea4bf0172550a2285fa27fbb13e3746f04c80f7fa088"},
|
||||
{file = "ruff-0.7.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b19fafe261bf741bca2764c14cbb4ee1819b67adb63ebc2db6401dcd652e3748"},
|
||||
{file = "ruff-0.7.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:28bd8220f4d8f79d590db9e2f6a0674f75ddbc3847277dd44ac1f8d30684b828"},
|
||||
{file = "ruff-0.7.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9fd67094e77efbea932e62b5d2483006154794040abb3a5072e659096415ae1e"},
|
||||
{file = "ruff-0.7.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:576305393998b7bd6c46018f8104ea3a9cb3fa7908c21d8580e3274a3b04b691"},
|
||||
{file = "ruff-0.7.2-py3-none-win32.whl", hash = "sha256:fa993cfc9f0ff11187e82de874dfc3611df80852540331bc85c75809c93253a8"},
|
||||
{file = "ruff-0.7.2-py3-none-win_amd64.whl", hash = "sha256:dd8800cbe0254e06b8fec585e97554047fb82c894973f7ff18558eee33d1cb88"},
|
||||
{file = "ruff-0.7.2-py3-none-win_arm64.whl", hash = "sha256:bb8368cd45bba3f57bb29cbb8d64b4a33f8415d0149d2655c5c8539452ce7760"},
|
||||
{file = "ruff-0.7.2.tar.gz", hash = "sha256:2b14e77293380e475b4e3a7a368e14549288ed2931fce259a6f99978669e844f"},
|
||||
{file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"},
|
||||
{file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"},
|
||||
{file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"},
|
||||
{file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"},
|
||||
{file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"},
|
||||
{file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"},
|
||||
{file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2536,33 +2515,33 @@ twisted = ["twisted"]
|
|||
|
||||
[[package]]
|
||||
name = "tomli"
|
||||
version = "2.0.2"
|
||||
version = "2.1.0"
|
||||
description = "A lil' TOML parser"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
|
||||
{file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
|
||||
{file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"},
|
||||
{file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tornado"
|
||||
version = "6.4.1"
|
||||
version = "6.4.2"
|
||||
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"},
|
||||
{file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a017d239bd1bb0919f72af256a970624241f070496635784d9bf0db640d3fec"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36e62ce8f63409301537222faffcef7dfc5284f27eec227389f2ad11b09d946"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca9eb02196e789c9cb5c3c7c0f04fb447dc2adffd95265b2c7223a8a615ccbf"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:304463bd0772442ff4d0f5149c6f1c2135a1fae045adf070821c6cdc76980634"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:c82c46813ba483a385ab2a99caeaedf92585a1f90defb5693351fa7e4ea0bf73"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:932d195ca9015956fa502c6b56af9eb06106140d844a335590c1ec7f5277d10c"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-win32.whl", hash = "sha256:2876cef82e6c5978fde1e0d5b1f919d756968d5b4282418f3146b79b58556482"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-win_amd64.whl", hash = "sha256:908b71bf3ff37d81073356a5fadcc660eb10c1476ee6e2725588626ce7e5ca38"},
|
||||
{file = "tornado-6.4.2.tar.gz", hash = "sha256:92bad5b4746e9879fd7bf1eb21dce4e3fc5128d71601f80005afa39237ad620b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -3121,5 +3100,5 @@ user-search = ["pyicu"]
|
|||
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.8.0"
|
||||
content-hash = "eaded26b4770b9d19bfcee6dee8b96203df358ce51939d9b90fdbcf605e2f5fd"
|
||||
python-versions = "^3.9.0"
|
||||
content-hash = "d71159b19349fdc0b7cd8e06e8c8778b603fc37b941c6df34ddc31746783d94d"
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
target-version = "py38"
|
||||
target-version = "py39"
|
||||
|
||||
[tool.ruff.lint]
|
||||
# See https://beta.ruff.rs/docs/rules/#error-e
|
||||
|
@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
|||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.118.0"
|
||||
version = "1.120.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
@ -155,7 +155,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
|||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.8.0"
|
||||
python = "^3.9.0"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
|
@ -178,7 +178,7 @@ Twisted = {extras = ["tls"], version = ">=18.9.0"}
|
|||
treq = ">=15.1"
|
||||
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
|
||||
pyOpenSSL = ">=16.0.0"
|
||||
PyYAML = ">=3.13"
|
||||
PyYAML = ">=5.3"
|
||||
pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.7"
|
||||
|
@ -241,7 +241,7 @@ authlib = { version = ">=0.15.1", optional = true }
|
|||
# `contrib/systemd/log_config.yaml`.
|
||||
# Note: systemd-python 231 appears to have been yanked from pypi
|
||||
systemd-python = { version = ">=231", optional = true }
|
||||
lxml = { version = ">=4.2.0", optional = true }
|
||||
lxml = { version = ">=4.5.2", optional = true }
|
||||
sentry-sdk = { version = ">=0.7.2", optional = true }
|
||||
opentracing = { version = ">=2.2.0", optional = true }
|
||||
jaeger-client = { version = ">=4.0.0", optional = true }
|
||||
|
@ -320,7 +320,7 @@ all = [
|
|||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
ruff = "0.7.2"
|
||||
ruff = "0.7.3"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
@ -370,7 +370,7 @@ tomli = ">=1.2.3"
|
|||
# runtime errors caused by build system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"]
|
||||
requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.10.2"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
|
@ -378,13 +378,13 @@ build-backend = "poetry.core.masonry.api"
|
|||
# Skip unsupported platforms (by us or by Rust).
|
||||
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
|
||||
# We skip:
|
||||
# - CPython 3.6 and 3.7: EOLed
|
||||
# - PyPy 3.7: we only support Python 3.8+
|
||||
# - CPython 3.6, 3.7 and 3.8: EOLed
|
||||
# - PyPy 3.7 and 3.8: we only support Python 3.9+
|
||||
# - musllinux i686: excluded to reduce number of wheels we build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
|
||||
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/14259
|
||||
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
|
|
107
rust/src/events/filter.rs
Normal file
107
rust/src/events/filter.rs
Normal file
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use pyo3::{exceptions::PyValueError, pyfunction, PyResult};
|
||||
|
||||
use crate::{
|
||||
identifier::UserID,
|
||||
matrix_const::{
|
||||
HISTORY_VISIBILITY_INVITED, HISTORY_VISIBILITY_JOINED, MEMBERSHIP_INVITE, MEMBERSHIP_JOIN,
|
||||
},
|
||||
};
|
||||
|
||||
#[pyfunction(name = "event_visible_to_server")]
|
||||
pub fn event_visible_to_server_py(
|
||||
sender: String,
|
||||
target_server_name: String,
|
||||
history_visibility: String,
|
||||
erased_senders: HashMap<String, bool>,
|
||||
partial_state_invisible: bool,
|
||||
memberships: Vec<(String, String)>, // (state_key, membership)
|
||||
) -> PyResult<bool> {
|
||||
event_visible_to_server(
|
||||
sender,
|
||||
target_server_name,
|
||||
history_visibility,
|
||||
erased_senders,
|
||||
partial_state_invisible,
|
||||
memberships,
|
||||
)
|
||||
.map_err(|e| PyValueError::new_err(format!("{e}")))
|
||||
}
|
||||
|
||||
/// Return whether the target server is allowed to see the event.
|
||||
///
|
||||
/// For a fully stated room, the target server is allowed to see an event E if:
|
||||
/// - the state at E has world readable or shared history vis, OR
|
||||
/// - the state at E says that the target server is in the room.
|
||||
///
|
||||
/// For a partially stated room, the target server is allowed to see E if:
|
||||
/// - E was created by this homeserver, AND:
|
||||
/// - the partial state at E has world readable or shared history vis, OR
|
||||
/// - the partial state at E says that the target server is in the room.
|
||||
pub fn event_visible_to_server(
|
||||
sender: String,
|
||||
target_server_name: String,
|
||||
history_visibility: String,
|
||||
erased_senders: HashMap<String, bool>,
|
||||
partial_state_invisible: bool,
|
||||
memberships: Vec<(String, String)>, // (state_key, membership)
|
||||
) -> anyhow::Result<bool> {
|
||||
if let Some(&erased) = erased_senders.get(&sender) {
|
||||
if erased {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
if partial_state_invisible {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if history_visibility != HISTORY_VISIBILITY_INVITED
|
||||
&& history_visibility != HISTORY_VISIBILITY_JOINED
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let mut visible = false;
|
||||
for (state_key, membership) in memberships {
|
||||
let state_key = UserID::try_from(state_key.as_ref())
|
||||
.map_err(|e| anyhow::anyhow!(format!("invalid user_id ({state_key}): {e}")))?;
|
||||
if state_key.server_name() != target_server_name {
|
||||
return Err(anyhow::anyhow!(
|
||||
"state_key.server_name ({}) does not match target_server_name ({target_server_name})",
|
||||
state_key.server_name()
|
||||
));
|
||||
}
|
||||
|
||||
match membership.as_str() {
|
||||
MEMBERSHIP_INVITE => {
|
||||
if history_visibility == HISTORY_VISIBILITY_INVITED {
|
||||
visible = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
MEMBERSHIP_JOIN => {
|
||||
visible = true;
|
||||
break;
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(visible)
|
||||
}
|
|
@ -22,15 +22,17 @@
|
|||
|
||||
use pyo3::{
|
||||
types::{PyAnyMethods, PyModule, PyModuleMethods},
|
||||
Bound, PyResult, Python,
|
||||
wrap_pyfunction, Bound, PyResult, Python,
|
||||
};
|
||||
|
||||
pub mod filter;
|
||||
mod internal_metadata;
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "events")?;
|
||||
child_module.add_class::<internal_metadata::EventInternalMetadata>()?;
|
||||
child_module.add_function(wrap_pyfunction!(filter::event_visible_to_server_py, m)?)?;
|
||||
|
||||
m.add_submodule(&child_module)?;
|
||||
|
||||
|
|
86
rust/src/identifier.rs
Normal file
86
rust/src/identifier.rs
Normal file
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
//! # Matrix Identifiers
|
||||
//!
|
||||
//! This module contains definitions and utilities for working with matrix identifiers.
|
||||
|
||||
use std::{fmt, ops::Deref};
|
||||
|
||||
/// Errors that can occur when parsing a matrix identifier.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum IdentifierError {
|
||||
IncorrectSigil,
|
||||
MissingColon,
|
||||
}
|
||||
|
||||
impl fmt::Display for IdentifierError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
/// A Matrix user_id.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct UserID(String);
|
||||
|
||||
impl UserID {
|
||||
/// Returns the `localpart` of the user_id.
|
||||
pub fn localpart(&self) -> &str {
|
||||
&self[1..self.colon_pos()]
|
||||
}
|
||||
|
||||
/// Returns the `server_name` / `domain` of the user_id.
|
||||
pub fn server_name(&self) -> &str {
|
||||
&self[self.colon_pos() + 1..]
|
||||
}
|
||||
|
||||
/// Returns the position of the ':' inside of the user_id.
|
||||
/// Used when splitting the user_id into it's respective parts.
|
||||
fn colon_pos(&self) -> usize {
|
||||
self.find(':').unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for UserID {
|
||||
type Error = IdentifierError;
|
||||
|
||||
/// Will try creating a `UserID` from the provided `&str`.
|
||||
/// Can fail if the user_id is incorrectly formatted.
|
||||
fn try_from(s: &str) -> Result<Self, Self::Error> {
|
||||
if !s.starts_with('@') {
|
||||
return Err(IdentifierError::IncorrectSigil);
|
||||
}
|
||||
|
||||
if s.find(':').is_none() {
|
||||
return Err(IdentifierError::MissingColon);
|
||||
}
|
||||
|
||||
Ok(UserID(s.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for UserID {
|
||||
type Target = str;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for UserID {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
|
@ -6,6 +6,8 @@ pub mod acl;
|
|||
pub mod errors;
|
||||
pub mod events;
|
||||
pub mod http;
|
||||
pub mod identifier;
|
||||
pub mod matrix_const;
|
||||
pub mod push;
|
||||
pub mod rendezvous;
|
||||
|
||||
|
|
28
rust/src/matrix_const.rs
Normal file
28
rust/src/matrix_const.rs
Normal file
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
//! # Matrix Constants
|
||||
//!
|
||||
//! This module contains definitions for constant values described by the matrix specification.
|
||||
|
||||
pub const HISTORY_VISIBILITY_WORLD_READABLE: &str = "world_readable";
|
||||
pub const HISTORY_VISIBILITY_SHARED: &str = "shared";
|
||||
pub const HISTORY_VISIBILITY_INVITED: &str = "invited";
|
||||
pub const HISTORY_VISIBILITY_JOINED: &str = "joined";
|
||||
|
||||
pub const MEMBERSHIP_BAN: &str = "ban";
|
||||
pub const MEMBERSHIP_LEAVE: &str = "leave";
|
||||
pub const MEMBERSHIP_KNOCK: &str = "knock";
|
||||
pub const MEMBERSHIP_INVITE: &str = "invite";
|
||||
pub const MEMBERSHIP_JOIN: &str = "join";
|
|
@ -23,7 +23,6 @@ use anyhow::bail;
|
|||
use anyhow::Context;
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
use regex;
|
||||
use regex::Regex;
|
||||
use regex::RegexBuilder;
|
||||
|
||||
|
|
|
@ -1233,9 +1233,9 @@
|
|||
},
|
||||
"enable_authenticated_media": {
|
||||
"type": "boolean",
|
||||
"description": "When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) – requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false after enabling, media marked as authenticated will be available over legacy endpoints. Will default to true in a future Synapse release.",
|
||||
"default": false,
|
||||
"examples": [true]
|
||||
"description": "When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) – requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to true (previously false). In a future release of Synapse, this option will be removed and become always-on.\n\nIn all cases, authenticated requests to download media will succeed, but for unauthenticated requests, this case-by-case breakdown describes whether media downloads are permitted:\n\n* `enable_authenticated_media = False`:\n * unauthenticated client or homeserver requesting local media: allowed\n * unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache, or as long as the remote homeserver does not require authentication to retrieve the media\n* `enable_authenticated_media = True`:\n * unauthenticated client or homeserver requesting local media: allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); otherwise denied.\n * unauthenticated client or homeserver requesting remote media: the same as for local media; allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); otherwise denied.\n\nIt is especially notable that media downloaded before this option existed (in older Synapse versions), or whilst this option was set to `False`, will perpetually be available over the legacy, unauthenticated endpoint, even after this option is set to `True`. This is for backwards compatibility with older clients and homeservers that do not yet support requesting authenticated media; those older clients or homeservers will not be cut off from media they can already see.\n\n_Changed in Synapse 1.120:_ This option now defaults to `True` when not set, whereas before this version it defaulted to `False`.",
|
||||
"default": true,
|
||||
"examples": [false]
|
||||
},
|
||||
"enable_media_repo": {
|
||||
"type": "boolean",
|
||||
|
@ -1897,7 +1897,7 @@
|
|||
},
|
||||
"old_signing_keys": {
|
||||
"type": "object",
|
||||
"description": "The keys that the server used to sign messages with but won't use to sign new messages.\n\nIt is possible to build an entry from an old `signing.key` file using the `export_signing_key` script which is provided with synapse.",
|
||||
"description": "The keys that the server used to sign messages with but won't use to sign new messages.\n\nIt is possible to build an entry from an old `signing.key` file using the `export_signing_key` script which is provided with synapse.\n\nIf you have lost the private key file, you can ask another server you trust to tell you the public keys it has seen from your server. To fetch the keys from `matrix.org`, try something like:\n\n```\ncurl https://matrix-federation.matrix.org/_matrix/key/v2/query/myserver.example.com |\n jq '.server_keys | map(.verify_keys) | add'\n```",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
|
@ -2084,11 +2084,37 @@ properties:
|
|||
`_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this
|
||||
option to true will still be available over the legacy endpoints. Note if
|
||||
the setting is switched to false after enabling, media marked as
|
||||
authenticated will be available over legacy endpoints. Will default to
|
||||
true in a future Synapse release.
|
||||
default: false
|
||||
authenticated will be available over legacy endpoints. Defaults to true
|
||||
(previously false). In a future release of Synapse, this option will be
|
||||
removed and become always-on.
|
||||
|
||||
|
||||
In all cases, authenticated requests to download media will succeed, but
|
||||
for unauthenticated requests, this case-by-case breakdown describes
|
||||
whether media downloads are permitted:
|
||||
|
||||
|
||||
* `enable_authenticated_media = False`:
|
||||
* unauthenticated client or homeserver requesting local media: allowed
|
||||
* unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache, or as long as the remote homeserver does not require authentication to retrieve the media
|
||||
* `enable_authenticated_media = True`:
|
||||
* unauthenticated client or homeserver requesting local media: allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); otherwise denied.
|
||||
* unauthenticated client or homeserver requesting remote media: the same as for local media; allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); otherwise denied.
|
||||
|
||||
It is especially notable that media downloaded before this option existed
|
||||
(in older Synapse versions), or whilst this option was set to `False`,
|
||||
will perpetually be available over the legacy, unauthenticated endpoint,
|
||||
even after this option is set to `True`. This is for backwards
|
||||
compatibility with older clients and homeservers that do not yet support
|
||||
requesting authenticated media; those older clients or homeservers will
|
||||
not be cut off from media they can already see.
|
||||
|
||||
|
||||
_Changed in Synapse 1.120:_ This option now defaults to `True` when not
|
||||
set, whereas before this version it defaulted to `False`.
|
||||
default: true
|
||||
examples:
|
||||
- true
|
||||
- false
|
||||
enable_media_repo:
|
||||
type: boolean
|
||||
description: >-
|
||||
|
@ -3323,6 +3349,18 @@ properties:
|
|||
|
||||
It is possible to build an entry from an old `signing.key` file using the
|
||||
`export_signing_key` script which is provided with synapse.
|
||||
|
||||
|
||||
If you have lost the private key file, you can ask another server you
|
||||
trust to tell you the public keys it has seen from your server. To fetch
|
||||
the keys from `matrix.org`, try something like:
|
||||
|
||||
|
||||
```
|
||||
|
||||
curl https://matrix-federation.matrix.org/_matrix/key/v2/query/myserver.example.com |
|
||||
jq '.server_keys | map(.verify_keys) | add'
|
||||
```
|
||||
additionalProperties:
|
||||
type: object
|
||||
properties:
|
||||
|
|
|
@ -28,9 +28,8 @@ from typing import Collection, Optional, Sequence, Set
|
|||
# example)
|
||||
DISTS = (
|
||||
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
|
||||
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
|
||||
"debian:bookworm", # (EOL 2026-06) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (rolling distro, no EOL)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:noble", # 24.04 LTS (EOL 2029-06)
|
||||
"ubuntu:oracular", # 24.10 (EOL 2025-07)
|
||||
|
|
|
@ -39,8 +39,8 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True
|
|||
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
|
||||
# if-statement completely.
|
||||
py_version = sys.version_info
|
||||
if py_version < (3, 8):
|
||||
print("Synapse requires Python 3.8 or above.")
|
||||
if py_version < (3, 9):
|
||||
print("Synapse requires Python 3.9 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
|
|
|
@ -88,6 +88,7 @@ from synapse.storage.databases.main.relations import RelationsWorkerStore
|
|||
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.sliding_sync import SlidingSyncStore
|
||||
from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.user_directory import (
|
||||
|
@ -255,6 +256,7 @@ class Store(
|
|||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
SlidingSyncStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
|
|
@ -365,11 +365,6 @@ class ExperimentalConfig(Config):
|
|||
# MSC3874: Filtering /messages with rel_types / not_rel_types.
|
||||
self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False)
|
||||
|
||||
# MSC3886: Simple client rendezvous capability
|
||||
self.msc3886_endpoint: Optional[str] = experimental.get(
|
||||
"msc3886_endpoint", None
|
||||
)
|
||||
|
||||
# MSC3890: Remotely silence local notifications
|
||||
# Note: This option requires "experimental_features.msc3391_enabled" to be
|
||||
# set to "true", in order to communicate account data deletions to clients.
|
||||
|
@ -450,3 +445,6 @@ class ExperimentalConfig(Config):
|
|||
|
||||
# MSC4210: Remove legacy mentions
|
||||
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
|
||||
|
||||
# MSC4222: Adding `state_after` to sync v2
|
||||
self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False)
|
||||
|
|
|
@ -272,9 +272,7 @@ class ContentRepositoryConfig(Config):
|
|||
remote_media_lifetime
|
||||
)
|
||||
|
||||
self.enable_authenticated_media = config.get(
|
||||
"enable_authenticated_media", False
|
||||
)
|
||||
self.enable_authenticated_media = config.get("enable_authenticated_media", True)
|
||||
|
||||
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
|
||||
assert data_dir_path is not None
|
||||
|
|
|
@ -215,9 +215,6 @@ class HttpListenerConfig:
|
|||
additional_resources: Dict[str, dict] = attr.Factory(dict)
|
||||
tag: Optional[str] = None
|
||||
request_id_header: Optional[str] = None
|
||||
# If true, the listener will return CORS response headers compatible with MSC3886:
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3886
|
||||
experimental_cors_msc3886: bool = False
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
|
@ -1004,7 +1001,6 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
|||
additional_resources=listener.get("additional_resources", {}),
|
||||
tag=listener.get("tag"),
|
||||
request_id_header=listener.get("request_id_header"),
|
||||
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
|
||||
)
|
||||
|
||||
if socket_path:
|
||||
|
|
|
@ -140,7 +140,6 @@ from typing import (
|
|||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
|
@ -170,7 +169,13 @@ from synapse.metrics.background_process_metrics import (
|
|||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
ReadReceipt,
|
||||
RoomStreamToken,
|
||||
StrCollection,
|
||||
get_domain_from_id,
|
||||
)
|
||||
from synapse.util import Clock
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import filter_destinations_by_retry_limiter
|
||||
|
@ -297,12 +302,10 @@ class _DestinationWakeupQueue:
|
|||
# being woken up.
|
||||
_MAX_TIME_IN_QUEUE = 30.0
|
||||
|
||||
# The maximum duration in seconds between waking up consecutive destination
|
||||
# queues.
|
||||
_MAX_DELAY = 0.1
|
||||
|
||||
sender: "FederationSender" = attr.ib()
|
||||
clock: Clock = attr.ib()
|
||||
max_delay_s: int = attr.ib()
|
||||
|
||||
queue: "OrderedDict[str, Literal[None]]" = attr.ib(factory=OrderedDict)
|
||||
processing: bool = attr.ib(default=False)
|
||||
|
||||
|
@ -332,7 +335,7 @@ class _DestinationWakeupQueue:
|
|||
# We also add an upper bound to the delay, to gracefully handle the
|
||||
# case where the queue only has a few entries in it.
|
||||
current_sleep_seconds = min(
|
||||
self._MAX_DELAY, self._MAX_TIME_IN_QUEUE / len(self.queue)
|
||||
self.max_delay_s, self._MAX_TIME_IN_QUEUE / len(self.queue)
|
||||
)
|
||||
|
||||
while self.queue:
|
||||
|
@ -416,19 +419,14 @@ class FederationSender(AbstractFederationSender):
|
|||
self._is_processing = False
|
||||
self._last_poked_id = -1
|
||||
|
||||
# map from room_id to a set of PerDestinationQueues which we believe are
|
||||
# awaiting a call to flush_read_receipts_for_room. The presence of an entry
|
||||
# here for a given room means that we are rate-limiting RR flushes to that room,
|
||||
# and that there is a pending call to _flush_rrs_for_room in the system.
|
||||
self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {}
|
||||
|
||||
self._rr_txn_interval_per_room_ms = (
|
||||
1000.0
|
||||
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
|
||||
)
|
||||
|
||||
self._external_cache = hs.get_external_cache()
|
||||
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
|
||||
|
||||
rr_txn_interval_per_room_s = (
|
||||
1.0 / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
|
||||
)
|
||||
self._destination_wakeup_queue = _DestinationWakeupQueue(
|
||||
self, self.clock, max_delay_s=rr_txn_interval_per_room_s
|
||||
)
|
||||
|
||||
# Regularly wake up destinations that have outstanding PDUs to be caught up
|
||||
self.clock.looping_call_now(
|
||||
|
@ -745,37 +743,48 @@ class FederationSender(AbstractFederationSender):
|
|||
|
||||
# Some background on the rate-limiting going on here.
|
||||
#
|
||||
# It turns out that if we attempt to send out RRs as soon as we get them from
|
||||
# a client, then we end up trying to do several hundred Hz of federation
|
||||
# transactions. (The number of transactions scales as O(N^2) on the size of a
|
||||
# room, since in a large room we have both more RRs coming in, and more servers
|
||||
# to send them to.)
|
||||
# It turns out that if we attempt to send out RRs as soon as we get them
|
||||
# from a client, then we end up trying to do several hundred Hz of
|
||||
# federation transactions. (The number of transactions scales as O(N^2)
|
||||
# on the size of a room, since in a large room we have both more RRs
|
||||
# coming in, and more servers to send them to.)
|
||||
#
|
||||
# This leads to a lot of CPU load, and we end up getting behind. The solution
|
||||
# currently adopted is as follows:
|
||||
# This leads to a lot of CPU load, and we end up getting behind. The
|
||||
# solution currently adopted is to differentiate between receipts and
|
||||
# destinations we should immediately send to, and those we can trickle
|
||||
# the receipts to.
|
||||
#
|
||||
# The first receipt in a given room is sent out immediately, at time T0. Any
|
||||
# further receipts are, in theory, batched up for N seconds, where N is calculated
|
||||
# based on the number of servers in the room to achieve a transaction frequency
|
||||
# of around 50Hz. So, for example, if there were 100 servers in the room, then
|
||||
# N would be 100 / 50Hz = 2 seconds.
|
||||
# The current logic is to send receipts out immediately if:
|
||||
# - the room is "small", i.e. there's only N servers to send receipts
|
||||
# to, and so sending out the receipts immediately doesn't cause too
|
||||
# much load; or
|
||||
# - the receipt is for an event that happened recently, as users
|
||||
# notice if receipts are delayed when they know other users are
|
||||
# currently reading the room; or
|
||||
# - the receipt is being sent to the server that sent the event, so
|
||||
# that users see receipts for their own receipts quickly.
|
||||
#
|
||||
# Then, after T+N, we flush out any receipts that have accumulated, and restart
|
||||
# the timer to flush out more receipts at T+2N, etc. If no receipts accumulate,
|
||||
# we stop the cycle and go back to the start.
|
||||
# For destinations that we should delay sending the receipt to, we queue
|
||||
# the receipts up to be sent in the next transaction, but don't trigger
|
||||
# a new transaction to be sent. We then add the destination to the
|
||||
# `DestinationWakeupQueue`, which will slowly iterate over each
|
||||
# destination and trigger a new transaction to be sent.
|
||||
#
|
||||
# However, in practice, it is often possible to flush out receipts earlier: in
|
||||
# particular, if we are sending a transaction to a given server anyway (for
|
||||
# example, because we have a PDU or a RR in another room to send), then we may
|
||||
# as well send out all of the pending RRs for that server. So it may be that
|
||||
# by the time we get to T+N, we don't actually have any RRs left to send out.
|
||||
# Nevertheless we continue to buffer up RRs for the room in question until we
|
||||
# reach the point that no RRs arrive between timer ticks.
|
||||
# However, in practice, it is often possible to send out delayed
|
||||
# receipts earlier: in particular, if we are sending a transaction to a
|
||||
# given server anyway (for example, because we have a PDU or a RR in
|
||||
# another room to send), then we may as well send out all of the pending
|
||||
# RRs for that server. So it may be that by the time we get to waking up
|
||||
# the destination, we don't actually have any RRs left to send out.
|
||||
#
|
||||
# For even more background, see https://github.com/matrix-org/synapse/issues/4730.
|
||||
# For even more background, see
|
||||
# https://github.com/matrix-org/synapse/issues/4730.
|
||||
|
||||
room_id = receipt.room_id
|
||||
|
||||
# Local read receipts always have 1 event ID.
|
||||
event_id = receipt.event_ids[0]
|
||||
|
||||
# Work out which remote servers should be poked and poke them.
|
||||
domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
|
||||
room_id
|
||||
|
@ -797,49 +806,51 @@ class FederationSender(AbstractFederationSender):
|
|||
if not domains:
|
||||
return
|
||||
|
||||
queues_pending_flush = self._queues_awaiting_rr_flush_by_room.get(room_id)
|
||||
# We now split which domains we want to wake up immediately vs which we
|
||||
# want to delay waking up.
|
||||
immediate_domains: StrCollection
|
||||
delay_domains: StrCollection
|
||||
|
||||
# if there is no flush yet scheduled, we will send out these receipts with
|
||||
# immediate flushes, and schedule the next flush for this room.
|
||||
if queues_pending_flush is not None:
|
||||
logger.debug("Queuing receipt for: %r", domains)
|
||||
if len(domains) < 10:
|
||||
# For "small" rooms send to all domains immediately
|
||||
immediate_domains = domains
|
||||
delay_domains = ()
|
||||
else:
|
||||
logger.debug("Sending receipt to: %r", domains)
|
||||
self._schedule_rr_flush_for_room(room_id, len(domains))
|
||||
metadata = await self.store.get_metadata_for_event(
|
||||
receipt.room_id, event_id
|
||||
)
|
||||
assert metadata is not None
|
||||
|
||||
for domain in domains:
|
||||
sender_domain = get_domain_from_id(metadata.sender)
|
||||
|
||||
if self.clock.time_msec() - metadata.received_ts < 60_000:
|
||||
# We always send receipts for recent messages immediately
|
||||
immediate_domains = domains
|
||||
delay_domains = ()
|
||||
else:
|
||||
# Otherwise, we delay waking up all destinations except for the
|
||||
# sender's domain.
|
||||
immediate_domains = []
|
||||
delay_domains = []
|
||||
for domain in domains:
|
||||
if domain == sender_domain:
|
||||
immediate_domains.append(domain)
|
||||
else:
|
||||
delay_domains.append(domain)
|
||||
|
||||
for domain in immediate_domains:
|
||||
# Add to destination queue and wake the destination up
|
||||
queue = self._get_per_destination_queue(domain)
|
||||
queue.queue_read_receipt(receipt)
|
||||
queue.attempt_new_transaction()
|
||||
|
||||
for domain in delay_domains:
|
||||
# Add to destination queue...
|
||||
queue = self._get_per_destination_queue(domain)
|
||||
queue.queue_read_receipt(receipt)
|
||||
|
||||
# if there is already a RR flush pending for this room, then make sure this
|
||||
# destination is registered for the flush
|
||||
if queues_pending_flush is not None:
|
||||
queues_pending_flush.add(queue)
|
||||
else:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
|
||||
def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None:
|
||||
# that is going to cause approximately len(domains) transactions, so now back
|
||||
# off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
|
||||
backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
|
||||
|
||||
logger.debug("Scheduling RR flush in %s in %d ms", room_id, backoff_ms)
|
||||
self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
|
||||
self._queues_awaiting_rr_flush_by_room[room_id] = set()
|
||||
|
||||
def _flush_rrs_for_room(self, room_id: str) -> None:
|
||||
queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
|
||||
logger.debug("Flushing RRs in %s to %s", room_id, queues)
|
||||
|
||||
if not queues:
|
||||
# no more RRs arrived for this room; we are done.
|
||||
return
|
||||
|
||||
# schedule the next flush
|
||||
self._schedule_rr_flush_for_room(room_id, len(queues))
|
||||
|
||||
for queue in queues:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
# ... and schedule the destination to be woken up.
|
||||
self._destination_wakeup_queue.add_to_queue(domain)
|
||||
|
||||
async def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
|
|
|
@ -156,7 +156,6 @@ class PerDestinationQueue:
|
|||
# Each receipt can only have a single receipt per
|
||||
# (room ID, receipt type, user ID, thread ID) tuple.
|
||||
self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = []
|
||||
self._rrs_pending_flush = False
|
||||
|
||||
# stream_id of last successfully sent to-device message.
|
||||
# NB: may be a long or an int.
|
||||
|
@ -258,15 +257,7 @@ class PerDestinationQueue:
|
|||
}
|
||||
)
|
||||
|
||||
def flush_read_receipts_for_room(self, room_id: str) -> None:
|
||||
# If there are any pending receipts for this room then force-flush them
|
||||
# in a new transaction.
|
||||
for edu in self._pending_receipt_edus:
|
||||
if room_id in edu:
|
||||
self._rrs_pending_flush = True
|
||||
self.attempt_new_transaction()
|
||||
# No use in checking remaining EDUs if the room was found.
|
||||
break
|
||||
self.mark_new_data()
|
||||
|
||||
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
|
||||
self._pending_edus_keyed[(edu.edu_type, key)] = edu
|
||||
|
@ -603,12 +594,9 @@ class PerDestinationQueue:
|
|||
self._destination, last_successful_stream_ordering
|
||||
)
|
||||
|
||||
def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]:
|
||||
def _get_receipt_edus(self, limit: int) -> Iterable[Edu]:
|
||||
if not self._pending_receipt_edus:
|
||||
return
|
||||
if not force_flush and not self._rrs_pending_flush:
|
||||
# not yet time for this lot
|
||||
return
|
||||
|
||||
# Send at most limit EDUs for receipts.
|
||||
for content in self._pending_receipt_edus[:limit]:
|
||||
|
@ -747,7 +735,7 @@ class _TransactionQueueManager:
|
|||
)
|
||||
|
||||
# Add read receipt EDUs.
|
||||
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
|
||||
pending_edus.extend(self.queue._get_receipt_edus(limit=5))
|
||||
edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus)
|
||||
|
||||
# Next, prioritize to-device messages so that existing encryption channels
|
||||
|
@ -795,13 +783,6 @@ class _TransactionQueueManager:
|
|||
if not self._pdus and not pending_edus:
|
||||
return [], []
|
||||
|
||||
# if we've decided to send a transaction anyway, and we have room, we
|
||||
# may as well send any pending RRs
|
||||
if edu_limit:
|
||||
pending_edus.extend(
|
||||
self.queue._get_receipt_edus(force_flush=True, limit=edu_limit)
|
||||
)
|
||||
|
||||
if self._pdus:
|
||||
self._last_stream_ordering = self._pdus[
|
||||
-1
|
||||
|
|
|
@ -124,6 +124,7 @@ class AdminHandler:
|
|||
"consent_ts": user_info.consent_ts,
|
||||
"user_type": user_info.user_type,
|
||||
"is_guest": user_info.is_guest,
|
||||
"suspended": user_info.suspended,
|
||||
}
|
||||
|
||||
if self._msc3866_enabled:
|
||||
|
|
|
@ -39,6 +39,8 @@ from synapse.replication.http.devices import ReplicationUploadKeysForUserRestSer
|
|||
from synapse.types import (
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
ScheduledTask,
|
||||
TaskStatus,
|
||||
UserID,
|
||||
get_domain_from_id,
|
||||
get_verify_key_from_cross_signing_key,
|
||||
|
@ -70,6 +72,7 @@ class E2eKeysHandler:
|
|||
self.is_mine = hs.is_mine
|
||||
self.clock = hs.get_clock()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
self._task_scheduler = hs.get_task_scheduler()
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
|
@ -116,6 +119,10 @@ class E2eKeysHandler:
|
|||
hs.config.experimental.msc3984_appservice_key_query
|
||||
)
|
||||
|
||||
self._task_scheduler.register_action(
|
||||
self._delete_old_one_time_keys_task, "delete_old_otks"
|
||||
)
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_devices(
|
||||
|
@ -615,7 +622,7 @@ class E2eKeysHandler:
|
|||
3. Attempt to fetch fallback keys from the database.
|
||||
|
||||
Args:
|
||||
local_query: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
local_query: An iterable of tuples of (user ID, device ID, algorithm, number of keys).
|
||||
always_include_fallback_keys: True to always include fallback keys.
|
||||
|
||||
Returns:
|
||||
|
@ -1574,6 +1581,45 @@ class E2eKeysHandler:
|
|||
return True
|
||||
return False
|
||||
|
||||
async def _delete_old_one_time_keys_task(
|
||||
self, task: ScheduledTask
|
||||
) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]:
|
||||
"""Scheduler task to delete old one time keys.
|
||||
|
||||
Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility
|
||||
that it could still have old OTKs that the client has dropped. This task is scheduled exactly once
|
||||
by a database schema delta file, and it clears out old one-time-keys that look like they came from libolm.
|
||||
"""
|
||||
last_user = task.result.get("from_user", "") if task.result else ""
|
||||
while True:
|
||||
# We process users in batches of 100
|
||||
users, rowcount = await self.store.delete_old_otks_for_next_user_batch(
|
||||
last_user, 100
|
||||
)
|
||||
if len(users) == 0:
|
||||
# We're done!
|
||||
return TaskStatus.COMPLETE, None, None
|
||||
|
||||
logger.debug(
|
||||
"Deleted %i old one-time-keys for users '%s'..'%s'",
|
||||
rowcount,
|
||||
users[0],
|
||||
users[-1],
|
||||
)
|
||||
last_user = users[-1]
|
||||
|
||||
# Store our progress
|
||||
await self._task_scheduler.update_task(
|
||||
task.id, result={"from_user": last_user}
|
||||
)
|
||||
|
||||
# Sleep a little before doing the next user.
|
||||
#
|
||||
# matrix.org has about 15M users in the e2e_one_time_keys_json table
|
||||
# (comprising 20M devices). We want this to take about a week, so we need
|
||||
# to do about one batch of 100 users every 4 seconds.
|
||||
await self.clock.sleep(4)
|
||||
|
||||
|
||||
def _check_cross_signing_key(
|
||||
key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None
|
||||
|
|
|
@ -196,7 +196,9 @@ class MessageHandler:
|
|||
AuthError (403) if the user doesn't have permission to view
|
||||
members of this room.
|
||||
"""
|
||||
state_filter = state_filter or StateFilter.all()
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if at_token:
|
||||
|
|
|
@ -143,6 +143,7 @@ class SyncConfig:
|
|||
filter_collection: FilterCollection
|
||||
is_guest: bool
|
||||
device_id: Optional[str]
|
||||
use_state_after: bool
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
|
@ -1141,6 +1142,7 @@ class SyncHandler:
|
|||
since_token: Optional[StreamToken],
|
||||
end_token: StreamToken,
|
||||
full_state: bool,
|
||||
joined: bool,
|
||||
) -> MutableStateMap[EventBase]:
|
||||
"""Works out the difference in state between the end of the previous sync and
|
||||
the start of the timeline.
|
||||
|
@ -1155,6 +1157,7 @@ class SyncHandler:
|
|||
the point just after their leave event.
|
||||
full_state: Whether to force returning the full state.
|
||||
`lazy_load_members` still applies when `full_state` is `True`.
|
||||
joined: whether the user is currently joined to the room
|
||||
|
||||
Returns:
|
||||
The state to return in the sync response for the room.
|
||||
|
@ -1230,11 +1233,12 @@ class SyncHandler:
|
|||
if full_state:
|
||||
state_ids = await self._compute_state_delta_for_full_sync(
|
||||
room_id,
|
||||
sync_config.user,
|
||||
sync_config,
|
||||
batch,
|
||||
end_token,
|
||||
members_to_fetch,
|
||||
timeline_state,
|
||||
joined,
|
||||
)
|
||||
else:
|
||||
# If this is an initial sync then full_state should be set, and
|
||||
|
@ -1244,6 +1248,7 @@ class SyncHandler:
|
|||
|
||||
state_ids = await self._compute_state_delta_for_incremental_sync(
|
||||
room_id,
|
||||
sync_config,
|
||||
batch,
|
||||
since_token,
|
||||
end_token,
|
||||
|
@ -1316,20 +1321,24 @@ class SyncHandler:
|
|||
async def _compute_state_delta_for_full_sync(
|
||||
self,
|
||||
room_id: str,
|
||||
syncing_user: UserID,
|
||||
sync_config: SyncConfig,
|
||||
batch: TimelineBatch,
|
||||
end_token: StreamToken,
|
||||
members_to_fetch: Optional[Set[str]],
|
||||
timeline_state: StateMap[str],
|
||||
joined: bool,
|
||||
) -> StateMap[str]:
|
||||
"""Calculate the state events to be included in a full sync response.
|
||||
|
||||
As with `_compute_state_delta_for_incremental_sync`, the result will include
|
||||
the membership events for the senders of each event in `members_to_fetch`.
|
||||
|
||||
Note that whether this returns the state at the start or the end of the
|
||||
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
|
||||
|
||||
Args:
|
||||
room_id: The room we are calculating for.
|
||||
syncing_user: The user that is calling `/sync`.
|
||||
sync_confg: The user that is calling `/sync`.
|
||||
batch: The timeline batch for the room that will be sent to the user.
|
||||
end_token: Token of the end of the current batch. Normally this will be
|
||||
the same as the global "now_token", but if the user has left the room,
|
||||
|
@ -1338,10 +1347,11 @@ class SyncHandler:
|
|||
events in the timeline.
|
||||
timeline_state: The contribution to the room state from state events in
|
||||
`batch`. Only contains the last event for any given state key.
|
||||
joined: whether the user is currently joined to the room
|
||||
|
||||
Returns:
|
||||
A map from (type, state_key) to event_id, for each event that we believe
|
||||
should be included in the `state` part of the sync response.
|
||||
should be included in the `state` or `state_after` part of the sync response.
|
||||
"""
|
||||
if members_to_fetch is not None:
|
||||
# Lazy-loading of membership events is enabled.
|
||||
|
@ -1359,7 +1369,7 @@ class SyncHandler:
|
|||
# is no guarantee that our membership will be in the auth events of
|
||||
# timeline events when the room is partial stated.
|
||||
state_filter = StateFilter.from_lazy_load_member_list(
|
||||
members_to_fetch.union((syncing_user.to_string(),))
|
||||
members_to_fetch.union((sync_config.user.to_string(),))
|
||||
)
|
||||
|
||||
# We are happy to use partial state to compute the `/sync` response.
|
||||
|
@ -1373,6 +1383,61 @@ class SyncHandler:
|
|||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
# Check if we are wanting to return the state at the start or end of the
|
||||
# timeline. If at the end we can just use the current state.
|
||||
if sync_config.use_state_after:
|
||||
# If we're getting the state at the end of the timeline, we can just
|
||||
# use the current state of the room (and roll back any changes
|
||||
# between when we fetched the current state and `end_token`).
|
||||
#
|
||||
# For rooms we're not joined to, there might be a very large number
|
||||
# of deltas between `end_token` and "now", and so instead we fetch
|
||||
# the state at the end of the timeline.
|
||||
if joined:
|
||||
state_ids = await self._state_storage_controller.get_current_state_ids(
|
||||
room_id,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
# Now roll back the state by looking at the state deltas between
|
||||
# end_token and now.
|
||||
deltas = await self.store.get_current_state_deltas_for_room(
|
||||
room_id,
|
||||
from_token=end_token.room_key,
|
||||
to_token=self.store.get_room_max_token(),
|
||||
)
|
||||
if deltas:
|
||||
mutable_state_ids = dict(state_ids)
|
||||
|
||||
# We iterate over the deltas backwards so that if there are
|
||||
# multiple changes of the same type/state_key we'll
|
||||
# correctly pick the earliest delta.
|
||||
for delta in reversed(deltas):
|
||||
if delta.prev_event_id:
|
||||
mutable_state_ids[(delta.event_type, delta.state_key)] = (
|
||||
delta.prev_event_id
|
||||
)
|
||||
elif (delta.event_type, delta.state_key) in mutable_state_ids:
|
||||
mutable_state_ids.pop((delta.event_type, delta.state_key))
|
||||
|
||||
state_ids = mutable_state_ids
|
||||
|
||||
return state_ids
|
||||
|
||||
else:
|
||||
# Just use state groups to get the state at the end of the
|
||||
# timeline, i.e. the state at the leave/etc event.
|
||||
state_at_timeline_end = (
|
||||
await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
return state_at_timeline_end
|
||||
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
|
@ -1405,6 +1470,7 @@ class SyncHandler:
|
|||
async def _compute_state_delta_for_incremental_sync(
|
||||
self,
|
||||
room_id: str,
|
||||
sync_config: SyncConfig,
|
||||
batch: TimelineBatch,
|
||||
since_token: StreamToken,
|
||||
end_token: StreamToken,
|
||||
|
@ -1419,8 +1485,12 @@ class SyncHandler:
|
|||
(`compute_state_delta`) is responsible for keeping track of which membership
|
||||
events we have already sent to the client, and hence ripping them out.
|
||||
|
||||
Note that whether this returns the state at the start or the end of the
|
||||
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
|
||||
|
||||
Args:
|
||||
room_id: The room we are calculating for.
|
||||
sync_config
|
||||
batch: The timeline batch for the room that will be sent to the user.
|
||||
since_token: Token of the end of the previous batch.
|
||||
end_token: Token of the end of the current batch. Normally this will be
|
||||
|
@ -1433,7 +1503,7 @@ class SyncHandler:
|
|||
|
||||
Returns:
|
||||
A map from (type, state_key) to event_id, for each event that we believe
|
||||
should be included in the `state` part of the sync response.
|
||||
should be included in the `state` or `state_after` part of the sync response.
|
||||
"""
|
||||
if members_to_fetch is not None:
|
||||
# Lazy-loading is enabled. Only return the state that is needed.
|
||||
|
@ -1445,6 +1515,51 @@ class SyncHandler:
|
|||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
# Check if we are wanting to return the state at the start or end of the
|
||||
# timeline. If at the end we can just use the current state delta stream.
|
||||
if sync_config.use_state_after:
|
||||
delta_state_ids: MutableStateMap[str] = {}
|
||||
|
||||
if members_to_fetch:
|
||||
# We're lazy-loading, so the client might need some more member
|
||||
# events to understand the events in this timeline. So we always
|
||||
# fish out all the member events corresponding to the timeline
|
||||
# here. The caller will then dedupe any redundant ones.
|
||||
member_ids = await self._state_storage_controller.get_current_state_ids(
|
||||
room_id=room_id,
|
||||
state_filter=StateFilter.from_types(
|
||||
(EventTypes.Member, member) for member in members_to_fetch
|
||||
),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
delta_state_ids.update(member_ids)
|
||||
|
||||
# We don't do LL filtering for incremental syncs - see
|
||||
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
|
||||
# N.B. this slows down incr syncs as we are now processing way more
|
||||
# state in the server than if we were LLing.
|
||||
#
|
||||
# i.e. we return all state deltas, including membership changes that
|
||||
# we'd normally exclude due to LL.
|
||||
deltas = await self.store.get_current_state_deltas_for_room(
|
||||
room_id=room_id,
|
||||
from_token=since_token.room_key,
|
||||
to_token=end_token.room_key,
|
||||
)
|
||||
for delta in deltas:
|
||||
if delta.event_id is None:
|
||||
# There was a state reset and this state entry is no longer
|
||||
# present, but we have no way of informing the client about
|
||||
# this, so we just skip it for now.
|
||||
continue
|
||||
|
||||
# Note that deltas are in stream ordering, so if there are
|
||||
# multiple deltas for a given type/state_key we'll always pick
|
||||
# the latest one.
|
||||
delta_state_ids[(delta.event_type, delta.state_key)] = delta.event_id
|
||||
|
||||
return delta_state_ids
|
||||
|
||||
# For a non-gappy sync if the events in the timeline are simply a linear
|
||||
# chain (i.e. no merging/branching of the graph), then we know the state
|
||||
# delta between the end of the previous sync and start of the new one is
|
||||
|
@ -2867,6 +2982,7 @@ class SyncHandler:
|
|||
since_token,
|
||||
room_builder.end_token,
|
||||
full_state=full_state,
|
||||
joined=room_builder.rtype == "joined",
|
||||
)
|
||||
else:
|
||||
# An out of band room won't have any state changes.
|
||||
|
|
|
@ -36,7 +36,6 @@ from typing import (
|
|||
)
|
||||
|
||||
import attr
|
||||
import multipart
|
||||
import treq
|
||||
from canonicaljson import encode_canonical_json
|
||||
from netaddr import AddrFormatError, IPAddress, IPSet
|
||||
|
@ -93,6 +92,20 @@ from synapse.util.async_helpers import timeout_deferred
|
|||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
# Support both import names for the `python-multipart` (PyPI) library,
|
||||
# which renamed its package name from `multipart` to `python_multipart`
|
||||
# in 0.0.13 (though supports the old import name for compatibility).
|
||||
# Note that the `multipart` package name conflicts with `multipart` (PyPI)
|
||||
# so we should prefer importing from `python_multipart` when possible.
|
||||
try:
|
||||
from python_multipart import MultipartParser
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from python_multipart import multipart
|
||||
except ImportError:
|
||||
from multipart import MultipartParser # type: ignore[no-redef]
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"])
|
||||
|
@ -1039,7 +1052,7 @@ class _MultipartParserProtocol(protocol.Protocol):
|
|||
self.deferred = deferred
|
||||
self.boundary = boundary
|
||||
self.max_length = max_length
|
||||
self.parser: Optional[multipart.MultipartParser] = None
|
||||
self.parser: Optional[MultipartParser] = None
|
||||
self.multipart_response = MultipartResponse()
|
||||
self.has_redirect = False
|
||||
self.in_json = False
|
||||
|
@ -1097,12 +1110,12 @@ class _MultipartParserProtocol(protocol.Protocol):
|
|||
self.deferred.errback()
|
||||
self.file_length += end - start
|
||||
|
||||
callbacks: "multipart.multipart.MultipartCallbacks" = {
|
||||
callbacks: "multipart.MultipartCallbacks" = {
|
||||
"on_header_field": on_header_field,
|
||||
"on_header_value": on_header_value,
|
||||
"on_part_data": on_part_data,
|
||||
}
|
||||
self.parser = multipart.MultipartParser(self.boundary, callbacks)
|
||||
self.parser = MultipartParser(self.boundary, callbacks)
|
||||
|
||||
self.total_length += len(incoming_data)
|
||||
if self.max_length is not None and self.total_length >= self.max_length:
|
||||
|
|
|
@ -921,15 +921,6 @@ def set_cors_headers(request: "SynapseRequest") -> None:
|
|||
b"Access-Control-Expose-Headers",
|
||||
b"Synapse-Trace-Id, Server, ETag",
|
||||
)
|
||||
elif request.experimental_cors_msc3886:
|
||||
request.setHeader(
|
||||
b"Access-Control-Allow-Headers",
|
||||
b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match",
|
||||
)
|
||||
request.setHeader(
|
||||
b"Access-Control-Expose-Headers",
|
||||
b"ETag, Location, X-Max-Bytes",
|
||||
)
|
||||
else:
|
||||
request.setHeader(
|
||||
b"Access-Control-Allow-Headers",
|
||||
|
|
|
@ -94,7 +94,6 @@ class SynapseRequest(Request):
|
|||
self.reactor = site.reactor
|
||||
self._channel = channel # this is used by the tests
|
||||
self.start_time = 0.0
|
||||
self.experimental_cors_msc3886 = site.experimental_cors_msc3886
|
||||
|
||||
# The requester, if authenticated. For federation requests this is the
|
||||
# server name, for client requests this is the Requester object.
|
||||
|
@ -666,10 +665,6 @@ class SynapseSite(ProxySite):
|
|||
|
||||
request_id_header = config.http_options.request_id_header
|
||||
|
||||
self.experimental_cors_msc3886: bool = (
|
||||
config.http_options.experimental_cors_msc3886
|
||||
)
|
||||
|
||||
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
|
||||
return request_class(
|
||||
channel,
|
||||
|
|
|
@ -39,7 +39,7 @@ from twisted.internet.endpoints import (
|
|||
)
|
||||
from twisted.internet.interfaces import (
|
||||
IPushProducer,
|
||||
IReactorTCP,
|
||||
IReactorTime,
|
||||
IStreamClientEndpoint,
|
||||
)
|
||||
from twisted.internet.protocol import Factory, Protocol
|
||||
|
@ -113,7 +113,7 @@ class RemoteHandler(logging.Handler):
|
|||
port: int,
|
||||
maximum_buffer: int = 1000,
|
||||
level: int = logging.NOTSET,
|
||||
_reactor: Optional[IReactorTCP] = None,
|
||||
_reactor: Optional[IReactorTime] = None,
|
||||
):
|
||||
super().__init__(level=level)
|
||||
self.host = host
|
||||
|
|
|
@ -259,7 +259,7 @@ class MediaRepository:
|
|||
"""
|
||||
media = await self.store.get_local_media(media_id)
|
||||
if media is None:
|
||||
raise SynapseError(404, "Unknow media ID", errcode=Codes.NOT_FOUND)
|
||||
raise NotFoundError("Unknown media ID")
|
||||
|
||||
if media.user_id != auth_user.to_string():
|
||||
raise SynapseError(
|
||||
|
|
|
@ -43,12 +43,15 @@ class ExperimentalFeature(str, Enum):
|
|||
|
||||
MSC3881 = "msc3881"
|
||||
MSC3575 = "msc3575"
|
||||
MSC4222 = "msc4222"
|
||||
|
||||
def is_globally_enabled(self, config: "HomeServerConfig") -> bool:
|
||||
if self is ExperimentalFeature.MSC3881:
|
||||
return config.experimental.msc3881_enabled
|
||||
if self is ExperimentalFeature.MSC3575:
|
||||
return config.experimental.msc3575_enabled
|
||||
if self is ExperimentalFeature.MSC4222:
|
||||
return config.experimental.msc4222_enabled
|
||||
|
||||
assert_never(self)
|
||||
|
||||
|
|
|
@ -34,51 +34,6 @@ if TYPE_CHECKING:
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# n.b [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) has now been closed.
|
||||
# However, we want to keep this implementation around for some time.
|
||||
# TODO: define an end-of-life date for this implementation.
|
||||
class MSC3886RendezvousServlet(RestServlet):
|
||||
"""
|
||||
This is a placeholder implementation of [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886)
|
||||
simple client rendezvous capability that is used by the "Sign in with QR" functionality.
|
||||
|
||||
This implementation only serves as a 307 redirect to a configured server rather than being a full implementation.
|
||||
|
||||
A module that implements the full functionality is available at: https://pypi.org/project/matrix-http-rendezvous-synapse/.
|
||||
|
||||
Request:
|
||||
|
||||
POST /rendezvous HTTP/1.1
|
||||
Content-Type: ...
|
||||
|
||||
...
|
||||
|
||||
Response:
|
||||
|
||||
HTTP/1.1 307
|
||||
Location: <configured endpoint>
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns(
|
||||
"/org.matrix.msc3886/rendezvous$", releases=[], v1=False, unstable=True
|
||||
)
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
redirection_target: Optional[str] = hs.config.experimental.msc3886_endpoint
|
||||
assert (
|
||||
redirection_target is not None
|
||||
), "Servlet is only registered if there is a redirection target"
|
||||
self.endpoint = redirection_target.encode("utf-8")
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> None:
|
||||
respond_with_redirect(
|
||||
request, self.endpoint, statusCode=TEMPORARY_REDIRECT, cors=True
|
||||
)
|
||||
|
||||
# PUT, GET and DELETE are not implemented as they should be fulfilled by the redirect target.
|
||||
|
||||
|
||||
class MSC4108DelegationRendezvousServlet(RestServlet):
|
||||
PATTERNS = client_patterns(
|
||||
"/org.matrix.msc4108/rendezvous$", releases=[], v1=False, unstable=True
|
||||
|
@ -114,9 +69,6 @@ class MSC4108RendezvousServlet(RestServlet):
|
|||
|
||||
|
||||
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
if hs.config.experimental.msc3886_endpoint is not None:
|
||||
MSC3886RendezvousServlet(hs).register(http_server)
|
||||
|
||||
if hs.config.experimental.msc4108_enabled:
|
||||
MSC4108RendezvousServlet(hs).register(http_server)
|
||||
|
||||
|
|
|
@ -152,6 +152,14 @@ class SyncRestServlet(RestServlet):
|
|||
filter_id = parse_string(request, "filter")
|
||||
full_state = parse_boolean(request, "full_state", default=False)
|
||||
|
||||
use_state_after = False
|
||||
if await self.store.is_feature_enabled(
|
||||
user.to_string(), ExperimentalFeature.MSC4222
|
||||
):
|
||||
use_state_after = parse_boolean(
|
||||
request, "org.matrix.msc4222.use_state_after", default=False
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"/sync: user=%r, timeout=%r, since=%r, "
|
||||
"set_presence=%r, filter_id=%r, device_id=%r",
|
||||
|
@ -184,6 +192,7 @@ class SyncRestServlet(RestServlet):
|
|||
full_state,
|
||||
device_id,
|
||||
last_ignore_accdata_streampos,
|
||||
use_state_after,
|
||||
)
|
||||
|
||||
if filter_id is None:
|
||||
|
@ -220,6 +229,7 @@ class SyncRestServlet(RestServlet):
|
|||
filter_collection=filter_collection,
|
||||
is_guest=requester.is_guest,
|
||||
device_id=device_id,
|
||||
use_state_after=use_state_after,
|
||||
)
|
||||
|
||||
since_token = None
|
||||
|
@ -258,7 +268,7 @@ class SyncRestServlet(RestServlet):
|
|||
# We know that the the requester has an access token since appservices
|
||||
# cannot use sync.
|
||||
response_content = await self.encode_response(
|
||||
time_now, sync_result, requester, filter_collection
|
||||
time_now, sync_config, sync_result, requester, filter_collection
|
||||
)
|
||||
|
||||
logger.debug("Event formatting complete")
|
||||
|
@ -268,6 +278,7 @@ class SyncRestServlet(RestServlet):
|
|||
async def encode_response(
|
||||
self,
|
||||
time_now: int,
|
||||
sync_config: SyncConfig,
|
||||
sync_result: SyncResult,
|
||||
requester: Requester,
|
||||
filter: FilterCollection,
|
||||
|
@ -292,7 +303,7 @@ class SyncRestServlet(RestServlet):
|
|||
)
|
||||
|
||||
joined = await self.encode_joined(
|
||||
sync_result.joined, time_now, serialize_options
|
||||
sync_config, sync_result.joined, time_now, serialize_options
|
||||
)
|
||||
|
||||
invited = await self.encode_invited(
|
||||
|
@ -304,7 +315,7 @@ class SyncRestServlet(RestServlet):
|
|||
)
|
||||
|
||||
archived = await self.encode_archived(
|
||||
sync_result.archived, time_now, serialize_options
|
||||
sync_config, sync_result.archived, time_now, serialize_options
|
||||
)
|
||||
|
||||
logger.debug("building sync response dict")
|
||||
|
@ -372,6 +383,7 @@ class SyncRestServlet(RestServlet):
|
|||
@trace_with_opname("sync.encode_joined")
|
||||
async def encode_joined(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
rooms: List[JoinedSyncResult],
|
||||
time_now: int,
|
||||
serialize_options: SerializeEventConfig,
|
||||
|
@ -380,6 +392,7 @@ class SyncRestServlet(RestServlet):
|
|||
Encode the joined rooms in a sync result
|
||||
|
||||
Args:
|
||||
sync_config
|
||||
rooms: list of sync results for rooms this user is joined to
|
||||
time_now: current time - used as a baseline for age calculations
|
||||
serialize_options: Event serializer options
|
||||
|
@ -389,7 +402,11 @@ class SyncRestServlet(RestServlet):
|
|||
joined = {}
|
||||
for room in rooms:
|
||||
joined[room.room_id] = await self.encode_room(
|
||||
room, time_now, joined=True, serialize_options=serialize_options
|
||||
sync_config,
|
||||
room,
|
||||
time_now,
|
||||
joined=True,
|
||||
serialize_options=serialize_options,
|
||||
)
|
||||
|
||||
return joined
|
||||
|
@ -477,6 +494,7 @@ class SyncRestServlet(RestServlet):
|
|||
@trace_with_opname("sync.encode_archived")
|
||||
async def encode_archived(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
rooms: List[ArchivedSyncResult],
|
||||
time_now: int,
|
||||
serialize_options: SerializeEventConfig,
|
||||
|
@ -485,6 +503,7 @@ class SyncRestServlet(RestServlet):
|
|||
Encode the archived rooms in a sync result
|
||||
|
||||
Args:
|
||||
sync_config
|
||||
rooms: list of sync results for rooms this user is joined to
|
||||
time_now: current time - used as a baseline for age calculations
|
||||
serialize_options: Event serializer options
|
||||
|
@ -494,13 +513,18 @@ class SyncRestServlet(RestServlet):
|
|||
joined = {}
|
||||
for room in rooms:
|
||||
joined[room.room_id] = await self.encode_room(
|
||||
room, time_now, joined=False, serialize_options=serialize_options
|
||||
sync_config,
|
||||
room,
|
||||
time_now,
|
||||
joined=False,
|
||||
serialize_options=serialize_options,
|
||||
)
|
||||
|
||||
return joined
|
||||
|
||||
async def encode_room(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
room: Union[JoinedSyncResult, ArchivedSyncResult],
|
||||
time_now: int,
|
||||
joined: bool,
|
||||
|
@ -508,6 +532,7 @@ class SyncRestServlet(RestServlet):
|
|||
) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
sync_config
|
||||
room: sync result for a single room
|
||||
time_now: current time - used as a baseline for age calculations
|
||||
token_id: ID of the user's auth token - used for namespacing
|
||||
|
@ -548,13 +573,20 @@ class SyncRestServlet(RestServlet):
|
|||
|
||||
account_data = room.account_data
|
||||
|
||||
# We either include a `state` or `state_after` field depending on
|
||||
# whether the client has opted in to the newer `state_after` behavior.
|
||||
if sync_config.use_state_after:
|
||||
state_key_name = "org.matrix.msc4222.state_after"
|
||||
else:
|
||||
state_key_name = "state"
|
||||
|
||||
result: JsonDict = {
|
||||
"timeline": {
|
||||
"events": serialized_timeline,
|
||||
"prev_batch": await room.timeline.prev_batch.to_string(self.store),
|
||||
"limited": room.timeline.limited,
|
||||
},
|
||||
"state": {"events": serialized_state},
|
||||
state_key_name: {"events": serialized_state},
|
||||
"account_data": {"events": account_data},
|
||||
}
|
||||
|
||||
|
@ -688,6 +720,7 @@ class SlidingSyncE2eeRestServlet(RestServlet):
|
|||
filter_collection=self.only_member_events_filter_collection,
|
||||
is_guest=requester.is_guest,
|
||||
device_id=device_id,
|
||||
use_state_after=False, # We don't return any rooms so this flag is a no-op
|
||||
)
|
||||
|
||||
since_token = None
|
||||
|
|
|
@ -149,9 +149,6 @@ class VersionsRestServlet(RestServlet):
|
|||
"org.matrix.msc3881": msc3881_enabled,
|
||||
# Adds support for filtering /messages by event relation.
|
||||
"org.matrix.msc3874": self.config.experimental.msc3874_enabled,
|
||||
# Adds support for simple HTTP rendezvous as per MSC3886
|
||||
"org.matrix.msc3886": self.config.experimental.msc3886_endpoint
|
||||
is not None,
|
||||
# Adds support for relation-based redactions as per MSC3912.
|
||||
"org.matrix.msc3912": self.config.experimental.msc3912_enabled,
|
||||
# Whether recursively provide relations is supported.
|
||||
|
|
|
@ -94,7 +94,7 @@ class BaseUploadServlet(RestServlet):
|
|||
|
||||
# if headers.hasHeader(b"Content-Disposition"):
|
||||
# disposition = headers.getRawHeaders(b"Content-Disposition")[0]
|
||||
# TODO(markjh): parse content-dispostion
|
||||
# TODO(markjh): parse content-disposition
|
||||
|
||||
return content_length, upload_name, media_type
|
||||
|
||||
|
|
|
@ -234,8 +234,11 @@ class StateStorageController:
|
|||
RuntimeError if we don't have a state group for one or more of the events
|
||||
(ie they are outliers or unknown)
|
||||
"""
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
await_full_state = True
|
||||
if state_filter and not state_filter.must_await_full_state(self._is_mine_id):
|
||||
if not state_filter.must_await_full_state(self._is_mine_id):
|
||||
await_full_state = False
|
||||
|
||||
event_to_groups = await self.get_state_group_for_events(
|
||||
|
@ -244,7 +247,7 @@ class StateStorageController:
|
|||
|
||||
groups = set(event_to_groups.values())
|
||||
group_to_state = await self.stores.state._get_state_for_groups(
|
||||
groups, state_filter or StateFilter.all()
|
||||
groups, state_filter
|
||||
)
|
||||
|
||||
state_event_map = await self.stores.main.get_events(
|
||||
|
@ -292,10 +295,11 @@ class StateStorageController:
|
|||
RuntimeError if we don't have a state group for one or more of the events
|
||||
(ie they are outliers or unknown)
|
||||
"""
|
||||
if (
|
||||
await_full_state
|
||||
and state_filter
|
||||
and not state_filter.must_await_full_state(self._is_mine_id)
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
if await_full_state and not state_filter.must_await_full_state(
|
||||
self._is_mine_id
|
||||
):
|
||||
# Full state is not required if the state filter is restrictive enough.
|
||||
await_full_state = False
|
||||
|
@ -306,7 +310,7 @@ class StateStorageController:
|
|||
|
||||
groups = set(event_to_groups.values())
|
||||
group_to_state = await self.stores.state._get_state_for_groups(
|
||||
groups, state_filter or StateFilter.all()
|
||||
groups, state_filter
|
||||
)
|
||||
|
||||
event_to_state = {
|
||||
|
@ -335,9 +339,10 @@ class StateStorageController:
|
|||
RuntimeError if we don't have a state group for the event (ie it is an
|
||||
outlier or is unknown)
|
||||
"""
|
||||
state_map = await self.get_state_for_events(
|
||||
[event_id], state_filter or StateFilter.all()
|
||||
)
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_map = await self.get_state_for_events([event_id], state_filter)
|
||||
return state_map[event_id]
|
||||
|
||||
@trace
|
||||
|
@ -365,9 +370,12 @@ class StateStorageController:
|
|||
RuntimeError if we don't have a state group for the event (ie it is an
|
||||
outlier or is unknown)
|
||||
"""
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_map = await self.get_state_ids_for_events(
|
||||
[event_id],
|
||||
state_filter or StateFilter.all(),
|
||||
state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
return state_map[event_id]
|
||||
|
@ -388,9 +396,12 @@ class StateStorageController:
|
|||
at the event and `state_filter` is not satisfied by partial state.
|
||||
Defaults to `True`.
|
||||
"""
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_ids = await self.get_state_ids_for_event(
|
||||
event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
|
@ -426,6 +437,9 @@ class StateStorageController:
|
|||
at the last event in the room before `stream_position` and
|
||||
`state_filter` is not satisfied by partial state. Defaults to `True`.
|
||||
"""
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
# FIXME: This gets the state at the latest event before the stream ordering,
|
||||
# which might not be the same as the "current state" of the room at the time
|
||||
# of the stream token if there were multiple forward extremities at the time.
|
||||
|
@ -442,7 +456,7 @@ class StateStorageController:
|
|||
if last_event_id:
|
||||
state = await self.get_state_after_event(
|
||||
last_event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
|
@ -500,9 +514,10 @@ class StateStorageController:
|
|||
Returns:
|
||||
Dict of state group to state map.
|
||||
"""
|
||||
return await self.stores.state._get_state_for_groups(
|
||||
groups, state_filter or StateFilter.all()
|
||||
)
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
return await self.stores.state._get_state_for_groups(groups, state_filter)
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
|
@ -583,12 +598,13 @@ class StateStorageController:
|
|||
Returns:
|
||||
The current state of the room.
|
||||
"""
|
||||
if await_full_state and (
|
||||
not state_filter or state_filter.must_await_full_state(self._is_mine_id)
|
||||
):
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
if await_full_state and state_filter.must_await_full_state(self._is_mine_id):
|
||||
await self._partial_state_room_tracker.await_full_state(room_id)
|
||||
|
||||
if state_filter and not state_filter.is_full():
|
||||
if state_filter is not None and not state_filter.is_full():
|
||||
return await self.stores.main.get_partial_filtered_current_state_ids(
|
||||
room_id, state_filter
|
||||
)
|
||||
|
|
|
@ -322,6 +322,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
|||
self._attempt_to_invalidate_cache(
|
||||
"get_unread_event_push_actions_by_room_for_user", (room_id,)
|
||||
)
|
||||
self._attempt_to_invalidate_cache("get_metadata_for_event", (room_id, event_id))
|
||||
|
||||
self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,))
|
||||
|
||||
|
@ -446,6 +447,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
|||
self._attempt_to_invalidate_cache("_get_state_group_for_event", None)
|
||||
|
||||
self._attempt_to_invalidate_cache("get_event_ordering", None)
|
||||
self._attempt_to_invalidate_cache("get_metadata_for_event", (room_id,))
|
||||
self._attempt_to_invalidate_cache("is_partial_state_event", None)
|
||||
self._attempt_to_invalidate_cache("_get_joined_profile_from_event_id", None)
|
||||
|
||||
|
|
|
@ -99,6 +99,13 @@ class EndToEndKeyBackgroundStore(SQLBaseStore):
|
|||
unique=True,
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="add_otk_ts_added_index",
|
||||
index_name="e2e_one_time_keys_json_user_id_device_id_algorithm_ts_added_idx",
|
||||
table="e2e_one_time_keys_json",
|
||||
columns=("user_id", "device_id", "algorithm", "ts_added_ms"),
|
||||
)
|
||||
|
||||
|
||||
class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorkerStore):
|
||||
def __init__(
|
||||
|
@ -1122,7 +1129,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
|||
"""Take a list of one time keys out of the database.
|
||||
|
||||
Args:
|
||||
query_list: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
query_list: An iterable of tuples of (user ID, device ID, algorithm, number of keys).
|
||||
|
||||
Returns:
|
||||
A tuple (results, missing) of:
|
||||
|
@ -1310,9 +1317,14 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
|||
OTK was found.
|
||||
"""
|
||||
|
||||
# Return the oldest keys from this device (based on `ts_added_ms`).
|
||||
# Doing so means that keys are issued in the same order they were uploaded,
|
||||
# which reduces the chances of a client expiring its copy of a (private)
|
||||
# key while the public key is still on the server, waiting to be issued.
|
||||
sql = """
|
||||
SELECT key_id, key_json FROM e2e_one_time_keys_json
|
||||
WHERE user_id = ? AND device_id = ? AND algorithm = ?
|
||||
ORDER BY ts_added_ms
|
||||
LIMIT ?
|
||||
"""
|
||||
|
||||
|
@ -1354,13 +1366,22 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
|||
A list of tuples (user_id, device_id, algorithm, key_id, key_json)
|
||||
for each OTK claimed.
|
||||
"""
|
||||
# Find, delete, and return the oldest keys from each device (based on
|
||||
# `ts_added_ms`).
|
||||
#
|
||||
# Doing so means that keys are issued in the same order they were uploaded,
|
||||
# which reduces the chances of a client expiring its copy of a (private)
|
||||
# key while the public key is still on the server, waiting to be issued.
|
||||
sql = """
|
||||
WITH claims(user_id, device_id, algorithm, claim_count) AS (
|
||||
VALUES ?
|
||||
), ranked_keys AS (
|
||||
SELECT
|
||||
user_id, device_id, algorithm, key_id, claim_count,
|
||||
ROW_NUMBER() OVER (PARTITION BY (user_id, device_id, algorithm)) AS r
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY (user_id, device_id, algorithm)
|
||||
ORDER BY ts_added_ms
|
||||
) AS r
|
||||
FROM e2e_one_time_keys_json
|
||||
JOIN claims USING (user_id, device_id, algorithm)
|
||||
)
|
||||
|
@ -1432,6 +1453,54 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
|||
impl,
|
||||
)
|
||||
|
||||
async def delete_old_otks_for_next_user_batch(
|
||||
self, after_user_id: str, number_of_users: int
|
||||
) -> Tuple[List[str], int]:
|
||||
"""Deletes old OTKs belonging to the next batch of users
|
||||
|
||||
Returns:
|
||||
`(users, rows)`, where:
|
||||
* `users` is the user IDs of the updated users. An empty list if we are done.
|
||||
* `rows` is the number of deleted rows
|
||||
"""
|
||||
|
||||
def impl(txn: LoggingTransaction) -> Tuple[List[str], int]:
|
||||
# Find a batch of users
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT DISTINCT(user_id) FROM e2e_one_time_keys_json
|
||||
WHERE user_id > ?
|
||||
ORDER BY user_id
|
||||
LIMIT ?
|
||||
""",
|
||||
(after_user_id, number_of_users),
|
||||
)
|
||||
users = [row[0] for row in txn.fetchall()]
|
||||
if len(users) == 0:
|
||||
return users, 0
|
||||
|
||||
# Delete any old OTKs belonging to those users.
|
||||
#
|
||||
# We only actually consider OTKs whose key ID is 6 characters long. These
|
||||
# keys were likely made by libolm rather than Vodozemac; libolm only kept
|
||||
# 100 private OTKs, so was far more vulnerable than Vodozemac to throwing
|
||||
# away keys prematurely.
|
||||
clause, args = make_in_list_sql_clause(
|
||||
txn.database_engine, "user_id", users
|
||||
)
|
||||
sql = f"""
|
||||
DELETE FROM e2e_one_time_keys_json
|
||||
WHERE {clause} AND ts_added_ms < ? AND length(key_id) = 6
|
||||
"""
|
||||
args.append(self._clock.time_msec() - (7 * 24 * 3600 * 1000))
|
||||
txn.execute(sql, args)
|
||||
|
||||
return users, txn.rowcount
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"delete_old_otks_for_next_user_batch", impl
|
||||
)
|
||||
|
||||
|
||||
class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
|
||||
def __init__(
|
||||
|
|
|
@ -193,6 +193,14 @@ class _EventRow:
|
|||
outlier: bool
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class EventMetadata:
|
||||
"""Event metadata returned by `get_metadata_for_event(..)`"""
|
||||
|
||||
sender: str
|
||||
received_ts: int
|
||||
|
||||
|
||||
class EventRedactBehaviour(Enum):
|
||||
"""
|
||||
What to do when retrieving a redacted event from the database.
|
||||
|
@ -2580,3 +2588,22 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
|
||||
)
|
||||
)
|
||||
|
||||
@cached(tree=True)
|
||||
async def get_metadata_for_event(
|
||||
self, room_id: str, event_id: str
|
||||
) -> Optional[EventMetadata]:
|
||||
row = await self.db_pool.simple_select_one(
|
||||
table="events",
|
||||
keyvalues={"room_id": room_id, "event_id": event_id},
|
||||
retcols=("sender", "received_ts"),
|
||||
allow_none=True,
|
||||
desc="get_metadata_for_event",
|
||||
)
|
||||
if row is None:
|
||||
return None
|
||||
|
||||
return EventMetadata(
|
||||
sender=row[0],
|
||||
received_ts=row[1],
|
||||
)
|
||||
|
|
|
@ -2550,7 +2550,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
|
|||
still contains events with partial state.
|
||||
"""
|
||||
try:
|
||||
async with self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id:
|
||||
async with (
|
||||
self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id
|
||||
):
|
||||
await self.db_pool.runInteraction(
|
||||
"clear_partial_state_room",
|
||||
self._clear_partial_state_room_txn,
|
||||
|
|
|
@ -21,7 +21,11 @@ import attr
|
|||
from synapse.api.errors import SlidingSyncUnknownPosition
|
||||
from synapse.logging.opentracing import log_kv
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
)
|
||||
from synapse.types import MultiWriterStreamToken, RoomStreamToken
|
||||
from synapse.types.handlers.sliding_sync import (
|
||||
HaveSentRoom,
|
||||
|
@ -35,12 +39,28 @@ from synapse.util import json_encoder
|
|||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main import DataStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncStore(SQLBaseStore):
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="sliding_sync_connection_room_configs_required_state_id_idx",
|
||||
index_name="sliding_sync_connection_room_configs_required_state_id_idx",
|
||||
table="sliding_sync_connection_room_configs",
|
||||
columns=("required_state_id",),
|
||||
)
|
||||
|
||||
async def get_latest_bump_stamp_for_room(
|
||||
self,
|
||||
room_id: str,
|
||||
|
|
|
@ -572,10 +572,10 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||
Returns:
|
||||
Map from type/state_key to event ID.
|
||||
"""
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
where_clause, where_args = (
|
||||
state_filter or StateFilter.all()
|
||||
).make_sql_filter_clause()
|
||||
where_clause, where_args = (state_filter).make_sql_filter_clause()
|
||||
|
||||
if not where_clause:
|
||||
# We delegate to the cached version
|
||||
|
@ -584,7 +584,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||
def _get_filtered_current_state_ids_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> StateMap[str]:
|
||||
results = StateMapWrapper(state_filter=state_filter or StateFilter.all())
|
||||
results = StateMapWrapper(state_filter=state_filter)
|
||||
|
||||
sql = """
|
||||
SELECT type, state_key, event_id FROM current_state_events
|
||||
|
@ -681,7 +681,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||
context: EventContext,
|
||||
) -> None:
|
||||
"""Update the state group for a partial state event"""
|
||||
async with self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id:
|
||||
async with (
|
||||
self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id
|
||||
):
|
||||
await self.db_pool.runInteraction(
|
||||
"update_state_for_partial_state_event",
|
||||
self._update_state_for_partial_state_event_txn,
|
||||
|
|
|
@ -20,18 +20,26 @@
|
|||
#
|
||||
|
||||
import logging
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
make_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.databases.main.stream import _filter_results_by_stream
|
||||
from synapse.types import RoomStreamToken, StrCollection
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -54,6 +62,21 @@ class StateDeltasStore(SQLBaseStore):
|
|||
# attribute. TODO: can we get static analysis to enforce this?
|
||||
_curr_state_delta_stream_cache: StreamChangeCache
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="current_state_delta_stream_room_index",
|
||||
index_name="current_state_delta_stream_room_idx",
|
||||
table="current_state_delta_stream",
|
||||
columns=("room_id", "stream_id"),
|
||||
)
|
||||
|
||||
async def get_partial_current_state_deltas(
|
||||
self, prev_stream_id: int, max_stream_id: int
|
||||
) -> Tuple[int, List[StateDelta]]:
|
||||
|
|
|
@ -112,8 +112,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
|
|||
Returns:
|
||||
Map from state_group to a StateMap at that point.
|
||||
"""
|
||||
|
||||
state_filter = state_filter or StateFilter.all()
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups}
|
||||
|
||||
|
|
|
@ -284,7 +284,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
|
|||
Returns:
|
||||
Dict of state group to state map.
|
||||
"""
|
||||
state_filter = state_filter or StateFilter.all()
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
member_filter, non_member_filter = state_filter.get_member_split()
|
||||
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2024 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
|
||||
-- Add an index on (user_id, device_id, algorithm, ts_added_ms) on e2e_one_time_keys_json, so that OTKs can
|
||||
-- efficiently be issued in the same order they were uploaded.
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(8803, 'add_otk_ts_added_index', '{}');
|
|
@ -0,0 +1,18 @@
|
|||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2024 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
|
||||
-- Add an index on `current_state_delta_stream(room_id, stream_id)` to allow
|
||||
-- efficient per-room lookups.
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(8804, 'current_state_delta_stream_room_index', '{}');
|
|
@ -0,0 +1,19 @@
|
|||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2024 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility
|
||||
-- that it could still have old OTKs that the client has dropped.
|
||||
--
|
||||
-- We create a scheduled task which will drop old OTKs, to flush them out.
|
||||
INSERT INTO scheduled_tasks(id, action, status, timestamp)
|
||||
VALUES ('delete_old_otks_task', 'delete_old_otks', 'scheduled', extract(epoch from current_timestamp) * 1000);
|
|
@ -0,0 +1,19 @@
|
|||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2024 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility
|
||||
-- that it could still have old OTKs that the client has dropped.
|
||||
--
|
||||
-- We create a scheduled task which will drop old OTKs, to flush them out.
|
||||
INSERT INTO scheduled_tasks(id, action, status, timestamp)
|
||||
VALUES ('delete_old_otks_task', 'delete_old_otks', 'scheduled', strftime('%s', 'now') * 1000);
|
|
@ -0,0 +1,20 @@
|
|||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2024 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
|
||||
-- Add an index on sliding_sync_connection_room_configs(required_state_id), so
|
||||
-- that when we delete entries in `sliding_sync_connection_required_state` it's
|
||||
-- efficient for Postgres to check they've been deleted from
|
||||
-- `sliding_sync_connection_room_configs` too
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(8805, 'sliding_sync_connection_room_configs_required_state_id_idx', '{}');
|
|
@ -10,7 +10,7 @@
|
|||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
from typing import Optional
|
||||
from typing import List, Mapping, Optional, Tuple
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
|
@ -105,3 +105,29 @@ class EventInternalMetadata:
|
|||
|
||||
def is_notifiable(self) -> bool:
|
||||
"""Whether this event can trigger a push notification"""
|
||||
|
||||
def event_visible_to_server(
|
||||
sender: str,
|
||||
target_server_name: str,
|
||||
history_visibility: str,
|
||||
erased_senders: Mapping[str, bool],
|
||||
partial_state_invisible: bool,
|
||||
memberships: List[Tuple[str, str]],
|
||||
) -> bool:
|
||||
"""Determine whether the server is allowed to see the unredacted event.
|
||||
|
||||
Args:
|
||||
sender: The sender of the event.
|
||||
target_server_name: The server we want to send the event to.
|
||||
history_visibility: The history_visibility value at the event.
|
||||
erased_senders: A mapping of users and whether they have requested erasure. If a
|
||||
user is not in the map, it is treated as though they haven't requested erasure.
|
||||
partial_state_invisible: Whether the event should be treated as invisible due to
|
||||
the partial state status of the room.
|
||||
memberships: A list of membership state information at the event for users
|
||||
matching the `target_server_name`. Each list item must contain a tuple of
|
||||
(state_key, membership).
|
||||
|
||||
Returns:
|
||||
Whether the server is allowed to see the unredacted event.
|
||||
"""
|
||||
|
|
|
@ -68,15 +68,23 @@ class StateFilter:
|
|||
include_others: bool = False
|
||||
|
||||
def __attrs_post_init__(self) -> None:
|
||||
# If `include_others` is set we canonicalise the filter by removing
|
||||
# wildcards from the types dictionary
|
||||
if self.include_others:
|
||||
# If `include_others` is set we canonicalise the filter by removing
|
||||
# wildcards from the types dictionary
|
||||
|
||||
# this is needed to work around the fact that StateFilter is frozen
|
||||
object.__setattr__(
|
||||
self,
|
||||
"types",
|
||||
immutabledict({k: v for k, v in self.types.items() if v is not None}),
|
||||
)
|
||||
else:
|
||||
# Otherwise we remove entries where the value is the empty set.
|
||||
object.__setattr__(
|
||||
self,
|
||||
"types",
|
||||
immutabledict({k: v for k, v in self.types.items() if v is None or v}),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def all() -> "StateFilter":
|
||||
|
|
|
@ -47,7 +47,6 @@ class WheelTimer(Generic[T]):
|
|||
"""
|
||||
self.bucket_size: int = bucket_size
|
||||
self.entries: List[_Entry[T]] = []
|
||||
self.current_tick: int = 0
|
||||
|
||||
def insert(self, now: int, obj: T, then: int) -> None:
|
||||
"""Inserts object into timer.
|
||||
|
@ -78,11 +77,10 @@ class WheelTimer(Generic[T]):
|
|||
self.entries[max(min_key, then_key) - min_key].elements.add(obj)
|
||||
return
|
||||
|
||||
next_key = now_key + 1
|
||||
if self.entries:
|
||||
last_key = self.entries[-1].end_key
|
||||
last_key = self.entries[-1].end_key + 1
|
||||
else:
|
||||
last_key = next_key
|
||||
last_key = now_key + 1
|
||||
|
||||
# Handle the case when `then` is in the past and `entries` is empty.
|
||||
then_key = max(last_key, then_key)
|
||||
|
|
|
@ -27,7 +27,6 @@ from typing import (
|
|||
Final,
|
||||
FrozenSet,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
|
@ -48,6 +47,7 @@ from synapse.events.utils import clone_event, prune_event
|
|||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage.controllers import StorageControllers
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.synapse_rust.events import event_visible_to_server
|
||||
from synapse.types import RetentionPolicy, StateMap, StrCollection, get_domain_from_id
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util import Clock
|
||||
|
@ -628,17 +628,6 @@ async def filter_events_for_server(
|
|||
"""Filter a list of events based on whether the target server is allowed to
|
||||
see them.
|
||||
|
||||
For a fully stated room, the target server is allowed to see an event E if:
|
||||
- the state at E has world readable or shared history vis, OR
|
||||
- the state at E says that the target server is in the room.
|
||||
|
||||
For a partially stated room, the target server is allowed to see E if:
|
||||
- E was created by this homeserver, AND:
|
||||
- the partial state at E has world readable or shared history vis, OR
|
||||
- the partial state at E says that the target server is in the room.
|
||||
|
||||
TODO: state before or state after?
|
||||
|
||||
Args:
|
||||
storage
|
||||
target_server_name
|
||||
|
@ -655,35 +644,6 @@ async def filter_events_for_server(
|
|||
The filtered events.
|
||||
"""
|
||||
|
||||
def is_sender_erased(event: EventBase, erased_senders: Mapping[str, bool]) -> bool:
|
||||
if erased_senders and erased_senders[event.sender]:
|
||||
logger.info("Sender of %s has been erased, redacting", event.event_id)
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_event_is_visible(
|
||||
visibility: str, memberships: StateMap[EventBase]
|
||||
) -> bool:
|
||||
if visibility not in (HistoryVisibility.INVITED, HistoryVisibility.JOINED):
|
||||
return True
|
||||
|
||||
# We now loop through all membership events looking for
|
||||
# membership states for the requesting server to determine
|
||||
# if the server is either in the room or has been invited
|
||||
# into the room.
|
||||
for ev in memberships.values():
|
||||
assert get_domain_from_id(ev.state_key) == target_server_name
|
||||
|
||||
memtype = ev.membership
|
||||
if memtype == Membership.JOIN:
|
||||
return True
|
||||
elif memtype == Membership.INVITE:
|
||||
if visibility == HistoryVisibility.INVITED:
|
||||
return True
|
||||
|
||||
# server has no users in the room: redact
|
||||
return False
|
||||
|
||||
if filter_out_erased_senders:
|
||||
erased_senders = await storage.main.are_users_erased(e.sender for e in events)
|
||||
else:
|
||||
|
@ -726,20 +686,16 @@ async def filter_events_for_server(
|
|||
target_server_name,
|
||||
)
|
||||
|
||||
def include_event_in_output(e: EventBase) -> bool:
|
||||
erased = is_sender_erased(e, erased_senders)
|
||||
visible = check_event_is_visible(
|
||||
event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {})
|
||||
)
|
||||
|
||||
if e.event_id in partial_state_invisible_event_ids:
|
||||
visible = False
|
||||
|
||||
return visible and not erased
|
||||
|
||||
to_return = []
|
||||
for e in events:
|
||||
if include_event_in_output(e):
|
||||
if event_visible_to_server(
|
||||
sender=e.sender,
|
||||
target_server_name=target_server_name,
|
||||
history_visibility=event_to_history_vis[e.event_id],
|
||||
erased_senders=erased_senders,
|
||||
partial_state_invisible=e.event_id in partial_state_invisible_event_ids,
|
||||
memberships=list(event_to_memberships.get(e.event_id, {}).values()),
|
||||
):
|
||||
to_return.append(e)
|
||||
elif redact:
|
||||
to_return.append(prune_event(e))
|
||||
|
@ -796,7 +752,7 @@ async def _event_to_history_vis(
|
|||
|
||||
async def _event_to_memberships(
|
||||
storage: StorageControllers, events: Collection[EventBase], server_name: str
|
||||
) -> Dict[str, StateMap[EventBase]]:
|
||||
) -> Dict[str, StateMap[Tuple[str, str]]]:
|
||||
"""Get the remote membership list at each of the given events
|
||||
|
||||
Returns a map from event id to state map, which will contain only membership events
|
||||
|
@ -849,7 +805,7 @@ async def _event_to_memberships(
|
|||
|
||||
return {
|
||||
e_id: {
|
||||
key: event_map[inner_e_id]
|
||||
key: (event_map[inner_e_id].state_key, event_map[inner_e_id].membership)
|
||||
for key, inner_e_id in key_to_eid.items()
|
||||
if inner_e_id in event_map
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ from synapse.handlers.device import DeviceHandler
|
|||
from synapse.rest import admin
|
||||
from synapse.rest.client import login
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.events_worker import EventMetadata
|
||||
from synapse.types import JsonDict, ReadReceipt
|
||||
from synapse.util import Clock
|
||||
|
||||
|
@ -55,12 +56,15 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
|
|||
federation_transport_client=self.federation_transport_client,
|
||||
)
|
||||
|
||||
hs.get_storage_controllers().state.get_current_hosts_in_room = AsyncMock( # type: ignore[method-assign]
|
||||
self.main_store = hs.get_datastores().main
|
||||
self.state_controller = hs.get_storage_controllers().state
|
||||
|
||||
self.state_controller.get_current_hosts_in_room = AsyncMock( # type: ignore[method-assign]
|
||||
return_value={"test", "host2"}
|
||||
)
|
||||
|
||||
hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = ( # type: ignore[method-assign]
|
||||
hs.get_storage_controllers().state.get_current_hosts_in_room
|
||||
self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = ( # type: ignore[method-assign]
|
||||
self.state_controller.get_current_hosts_in_room
|
||||
)
|
||||
|
||||
return hs
|
||||
|
@ -185,12 +189,15 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
|
|||
],
|
||||
)
|
||||
|
||||
def test_send_receipts_with_backoff(self) -> None:
|
||||
"""Send two receipts in quick succession; the second should be flushed, but
|
||||
only after 20ms"""
|
||||
def test_send_receipts_with_backoff_small_room(self) -> None:
|
||||
"""Read receipt in small rooms should not be delayed"""
|
||||
mock_send_transaction = self.federation_transport_client.send_transaction
|
||||
mock_send_transaction.return_value = {}
|
||||
|
||||
self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = AsyncMock( # type: ignore[method-assign]
|
||||
return_value={"test", "host2"}
|
||||
)
|
||||
|
||||
sender = self.hs.get_federation_sender()
|
||||
receipt = ReadReceipt(
|
||||
"room_id",
|
||||
|
@ -206,7 +213,104 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
|
|||
|
||||
# expect a call to send_transaction
|
||||
mock_send_transaction.assert_called_once()
|
||||
json_cb = mock_send_transaction.call_args[0][1]
|
||||
self._assert_edu_in_call(mock_send_transaction.call_args[0][1])
|
||||
|
||||
def test_send_receipts_with_backoff_recent_event(self) -> None:
|
||||
"""Read receipt for a recent message should not be delayed"""
|
||||
mock_send_transaction = self.federation_transport_client.send_transaction
|
||||
mock_send_transaction.return_value = {}
|
||||
|
||||
# Pretend this is a big room
|
||||
self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = AsyncMock( # type: ignore[method-assign]
|
||||
return_value={"test"} | {f"host{i}" for i in range(20)}
|
||||
)
|
||||
|
||||
self.main_store.get_metadata_for_event = AsyncMock(
|
||||
return_value=EventMetadata(
|
||||
received_ts=self.clock.time_msec(),
|
||||
sender="@test:test",
|
||||
)
|
||||
)
|
||||
|
||||
sender = self.hs.get_federation_sender()
|
||||
receipt = ReadReceipt(
|
||||
"room_id",
|
||||
"m.read",
|
||||
"user_id",
|
||||
["event_id"],
|
||||
thread_id=None,
|
||||
data={"ts": 1234},
|
||||
)
|
||||
self.get_success(sender.send_read_receipt(receipt))
|
||||
|
||||
self.pump()
|
||||
|
||||
# expect a call to send_transaction for each host
|
||||
self.assertEqual(mock_send_transaction.call_count, 20)
|
||||
self._assert_edu_in_call(mock_send_transaction.call_args.args[1])
|
||||
|
||||
mock_send_transaction.reset_mock()
|
||||
|
||||
def test_send_receipts_with_backoff_sender(self) -> None:
|
||||
"""Read receipt for a message should not be delayed to the sender, but
|
||||
is delayed to everyone else"""
|
||||
mock_send_transaction = self.federation_transport_client.send_transaction
|
||||
mock_send_transaction.return_value = {}
|
||||
|
||||
# Pretend this is a big room
|
||||
self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = AsyncMock( # type: ignore[method-assign]
|
||||
return_value={"test"} | {f"host{i}" for i in range(20)}
|
||||
)
|
||||
|
||||
self.main_store.get_metadata_for_event = AsyncMock(
|
||||
return_value=EventMetadata(
|
||||
received_ts=self.clock.time_msec() - 5 * 60_000,
|
||||
sender="@test:host1",
|
||||
)
|
||||
)
|
||||
|
||||
sender = self.hs.get_federation_sender()
|
||||
receipt = ReadReceipt(
|
||||
"room_id",
|
||||
"m.read",
|
||||
"user_id",
|
||||
["event_id"],
|
||||
thread_id=None,
|
||||
data={"ts": 1234},
|
||||
)
|
||||
self.get_success(sender.send_read_receipt(receipt))
|
||||
|
||||
self.pump()
|
||||
|
||||
# First, expect a call to send_transaction for the sending host
|
||||
mock_send_transaction.assert_called()
|
||||
|
||||
transaction = mock_send_transaction.call_args_list[0].args[0]
|
||||
self.assertEqual(transaction.destination, "host1")
|
||||
self._assert_edu_in_call(mock_send_transaction.call_args_list[0].args[1])
|
||||
|
||||
# We also expect a call to one of the other hosts, as the first
|
||||
# destination to wake up.
|
||||
self.assertEqual(mock_send_transaction.call_count, 2)
|
||||
self._assert_edu_in_call(mock_send_transaction.call_args.args[1])
|
||||
|
||||
mock_send_transaction.reset_mock()
|
||||
|
||||
# We now expect to see 18 more transactions to the remaining hosts
|
||||
# periodically.
|
||||
for _ in range(18):
|
||||
self.reactor.advance(
|
||||
1.0
|
||||
/ self.hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
|
||||
)
|
||||
|
||||
mock_send_transaction.assert_called_once()
|
||||
self._assert_edu_in_call(mock_send_transaction.call_args.args[1])
|
||||
mock_send_transaction.reset_mock()
|
||||
|
||||
def _assert_edu_in_call(self, json_cb: Callable[[], JsonDict]) -> None:
|
||||
"""Assert that the given `json_cb` from a `send_transaction` has a
|
||||
receipt in it."""
|
||||
data = json_cb()
|
||||
self.assertEqual(
|
||||
data["edus"],
|
||||
|
@ -226,46 +330,6 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
|
|||
}
|
||||
],
|
||||
)
|
||||
mock_send_transaction.reset_mock()
|
||||
|
||||
# send the second RR
|
||||
receipt = ReadReceipt(
|
||||
"room_id",
|
||||
"m.read",
|
||||
"user_id",
|
||||
["other_id"],
|
||||
thread_id=None,
|
||||
data={"ts": 1234},
|
||||
)
|
||||
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
|
||||
self.pump()
|
||||
mock_send_transaction.assert_not_called()
|
||||
|
||||
self.reactor.advance(19)
|
||||
mock_send_transaction.assert_not_called()
|
||||
|
||||
self.reactor.advance(10)
|
||||
mock_send_transaction.assert_called_once()
|
||||
json_cb = mock_send_transaction.call_args[0][1]
|
||||
data = json_cb()
|
||||
self.assertEqual(
|
||||
data["edus"],
|
||||
[
|
||||
{
|
||||
"edu_type": EduTypes.RECEIPT,
|
||||
"content": {
|
||||
"room_id": {
|
||||
"m.read": {
|
||||
"user_id": {
|
||||
"event_ids": ["other_id"],
|
||||
"data": {"ts": 1234},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class FederationSenderPresenceTestCases(HomeserverTestCase):
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue