mirror of
https://github.com/element-hq/synapse.git
synced 2024-12-14 11:57:44 +00:00
Merge branch 'rav/fix_custom_ca' into rav/enable_tls_verification
This commit is contained in:
commit
7603a706eb
28 changed files with 219 additions and 43 deletions
|
@ -1 +1 @@
|
||||||
Run black on synapse.crypto.keyring.
|
Preparatory work for key-validity features.
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Rewrite store_server_verify_key to store several keys at once.
|
Preparatory work for key-validity features.
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Remove unused VerifyKey.expired and .time_added fields.
|
Preparatory work for key-validity features.
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Simplify Keyring.process_v2_response.
|
Preparatory work for key-validity features.
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Store key validity time in the storage layer.
|
Preparatory work for key-validity features.
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Refactor synapse.crypto.keyring to use a KeyFetcher interface.
|
Preparatory work for key-validity features.
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Simplification to Keyring.wait_for_previous_lookups.
|
Preparatory work for key-validity features.
|
||||||
|
|
1
changelog.d/5284.misc
Normal file
1
changelog.d/5284.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Improve sample config for monthly active user blocking.
|
|
@ -1 +1 @@
|
||||||
Refactor keyring.VerifyKeyRequest to use attr.s.
|
Preparatory work for key-validity features.
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Rewrite get_server_verify_keys, again.
|
Preparatory work for key-validity features.
|
||||||
|
|
1
changelog.d/5317.bugfix
Normal file
1
changelog.d/5317.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails.
|
|
@ -1 +1 @@
|
||||||
Rename VerifyKeyRequest.deferred field.
|
Preparatory work for key-validity features.
|
||||||
|
|
|
@ -1,2 +1 @@
|
||||||
Various improvements to debug logging.
|
Preparatory work for key-validity features.
|
||||||
|
|
||||||
|
|
1
changelog.d/5352.bugfix
Normal file
1
changelog.d/5352.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix room stats and presence background updates to correctly handle missing events.
|
1
changelog.d/5356.misc
Normal file
1
changelog.d/5356.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Preparatory work for key-validity features.
|
1
changelog.d/5357.doc
Normal file
1
changelog.d/5357.doc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix notes about ACME in the MSC1711 faq.
|
1
changelog.d/5360.feature
Normal file
1
changelog.d/5360.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Update /_matrix/client/versions to reference support for r0.5.0.
|
1
changelog.d/5362.bugfix
Normal file
1
changelog.d/5362.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix `federation_custom_ca_list` configuration option.
|
|
@ -145,12 +145,11 @@ You can do this with a `.well-known` file as follows:
|
||||||
1. Keep the SRV record in place - it is needed for backwards compatibility
|
1. Keep the SRV record in place - it is needed for backwards compatibility
|
||||||
with Synapse 0.34 and earlier.
|
with Synapse 0.34 and earlier.
|
||||||
|
|
||||||
2. Give synapse a certificate corresponding to the target domain
|
2. Give Synapse a certificate corresponding to the target domain
|
||||||
(`customer.example.net` in the above example). Currently Synapse's ACME
|
(`customer.example.net` in the above example). You can either use Synapse's
|
||||||
support [does not support
|
built-in [ACME support](./ACME.md) for this (via the `domain` parameter in
|
||||||
this](https://github.com/matrix-org/synapse/issues/4552), so you will have
|
the `acme` section), or acquire a certificate yourself and give it to
|
||||||
to acquire a certificate yourself and give it to Synapse via
|
Synapse via `tls_certificate_path` and `tls_private_key_path`.
|
||||||
`tls_certificate_path` and `tls_private_key_path`.
|
|
||||||
|
|
||||||
3. Restart Synapse to ensure the new certificate is loaded.
|
3. Restart Synapse to ensure the new certificate is loaded.
|
||||||
|
|
||||||
|
|
|
@ -261,6 +261,22 @@ listeners:
|
||||||
|
|
||||||
# Monthly Active User Blocking
|
# Monthly Active User Blocking
|
||||||
#
|
#
|
||||||
|
# Used in cases where the admin or server owner wants to limit to the
|
||||||
|
# number of monthly active users.
|
||||||
|
#
|
||||||
|
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
|
||||||
|
# anabled and a limit is reached the server returns a 'ResourceLimitError'
|
||||||
|
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
|
||||||
|
#
|
||||||
|
# 'max_mau_value' is the hard limit of monthly active users above which
|
||||||
|
# the server will start blocking user actions.
|
||||||
|
#
|
||||||
|
# 'mau_trial_days' is a means to add a grace period for active users. It
|
||||||
|
# means that users must be active for this number of days before they
|
||||||
|
# can be considered active and guards against the case where lots of users
|
||||||
|
# sign up in a short space of time never to return after their initial
|
||||||
|
# session.
|
||||||
|
#
|
||||||
#limit_usage_by_mau: False
|
#limit_usage_by_mau: False
|
||||||
#max_mau_value: 50
|
#max_mau_value: 50
|
||||||
#mau_trial_days: 2
|
#mau_trial_days: 2
|
||||||
|
|
|
@ -585,6 +585,22 @@ class ServerConfig(Config):
|
||||||
|
|
||||||
# Monthly Active User Blocking
|
# Monthly Active User Blocking
|
||||||
#
|
#
|
||||||
|
# Used in cases where the admin or server owner wants to limit to the
|
||||||
|
# number of monthly active users.
|
||||||
|
#
|
||||||
|
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
|
||||||
|
# anabled and a limit is reached the server returns a 'ResourceLimitError'
|
||||||
|
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
|
||||||
|
#
|
||||||
|
# 'max_mau_value' is the hard limit of monthly active users above which
|
||||||
|
# the server will start blocking user actions.
|
||||||
|
#
|
||||||
|
# 'mau_trial_days' is a means to add a grace period for active users. It
|
||||||
|
# means that users must be active for this number of days before they
|
||||||
|
# can be considered active and guards against the case where lots of users
|
||||||
|
# sign up in a short space of time never to return after their initial
|
||||||
|
# session.
|
||||||
|
#
|
||||||
#limit_usage_by_mau: False
|
#limit_usage_by_mau: False
|
||||||
#max_mau_value: 50
|
#max_mau_value: 50
|
||||||
#mau_trial_days: 2
|
#mau_trial_days: 2
|
||||||
|
|
|
@ -107,7 +107,7 @@ class TlsConfig(Config):
|
||||||
certs = []
|
certs = []
|
||||||
for ca_file in custom_ca_list:
|
for ca_file in custom_ca_list:
|
||||||
logger.debug("Reading custom CA certificate file: %s", ca_file)
|
logger.debug("Reading custom CA certificate file: %s", ca_file)
|
||||||
content = self.read_file(ca_file)
|
content = self.read_file(ca_file, "federation_custom_ca_list")
|
||||||
|
|
||||||
# Parse the CA certificates
|
# Parse the CA certificates
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -35,6 +35,7 @@ from synapse.api.errors import (
|
||||||
CodeMessageException,
|
CodeMessageException,
|
||||||
FederationDeniedError,
|
FederationDeniedError,
|
||||||
FederationError,
|
FederationError,
|
||||||
|
RequestSendFailed,
|
||||||
StoreError,
|
StoreError,
|
||||||
SynapseError,
|
SynapseError,
|
||||||
)
|
)
|
||||||
|
@ -2027,9 +2028,21 @@ class FederationHandler(BaseHandler):
|
||||||
"""
|
"""
|
||||||
room_version = yield self.store.get_room_version(event.room_id)
|
room_version = yield self.store.get_room_version(event.room_id)
|
||||||
|
|
||||||
yield self._update_auth_events_and_context_for_auth(
|
try:
|
||||||
origin, event, context, auth_events
|
yield self._update_auth_events_and_context_for_auth(
|
||||||
)
|
origin, event, context, auth_events
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
# We don't really mind if the above fails, so lets not fail
|
||||||
|
# processing if it does. However, it really shouldn't fail so
|
||||||
|
# let's still log as an exception since we'll still want to fix
|
||||||
|
# any bugs.
|
||||||
|
logger.exception(
|
||||||
|
"Failed to double check auth events for %s with remote. "
|
||||||
|
"Ignoring failure and continuing processing of event.",
|
||||||
|
event.event_id,
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.auth.check(room_version, event, auth_events=auth_events)
|
self.auth.check(room_version, event, auth_events=auth_events)
|
||||||
except AuthError as e:
|
except AuthError as e:
|
||||||
|
@ -2042,6 +2055,15 @@ class FederationHandler(BaseHandler):
|
||||||
):
|
):
|
||||||
"""Helper for do_auth. See there for docs.
|
"""Helper for do_auth. See there for docs.
|
||||||
|
|
||||||
|
Checks whether a given event has the expected auth events. If it
|
||||||
|
doesn't then we talk to the remote server to compare state to see if
|
||||||
|
we can come to a consensus (e.g. if one server missed some valid
|
||||||
|
state).
|
||||||
|
|
||||||
|
This attempts to resovle any potential divergence of state between
|
||||||
|
servers, but is not essential and so failures should not block further
|
||||||
|
processing of the event.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
origin (str):
|
origin (str):
|
||||||
event (synapse.events.EventBase):
|
event (synapse.events.EventBase):
|
||||||
|
@ -2088,9 +2110,15 @@ class FederationHandler(BaseHandler):
|
||||||
missing_auth,
|
missing_auth,
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
remote_auth_chain = yield self.federation_client.get_event_auth(
|
try:
|
||||||
origin, event.room_id, event.event_id
|
remote_auth_chain = yield self.federation_client.get_event_auth(
|
||||||
)
|
origin, event.room_id, event.event_id
|
||||||
|
)
|
||||||
|
except RequestSendFailed as e:
|
||||||
|
# The other side isn't around or doesn't implement the
|
||||||
|
# endpoint, so lets just bail out.
|
||||||
|
logger.info("Failed to get event auth from remote: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
seen_remotes = yield self.store.have_seen_events(
|
seen_remotes = yield self.store.have_seen_events(
|
||||||
[e.event_id for e in remote_auth_chain]
|
[e.event_id for e in remote_auth_chain]
|
||||||
|
@ -2236,12 +2264,18 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# 2. Get remote difference.
|
# 2. Get remote difference.
|
||||||
result = yield self.federation_client.query_auth(
|
try:
|
||||||
origin,
|
result = yield self.federation_client.query_auth(
|
||||||
event.room_id,
|
origin,
|
||||||
event.event_id,
|
event.room_id,
|
||||||
local_auth_chain,
|
event.event_id,
|
||||||
)
|
local_auth_chain,
|
||||||
|
)
|
||||||
|
except RequestSendFailed as e:
|
||||||
|
# The other side isn't around or doesn't implement the
|
||||||
|
# endpoint, so lets just bail out.
|
||||||
|
logger.info("Failed to query auth from remote: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
seen_remotes = yield self.store.have_seen_events(
|
seen_remotes = yield self.store.have_seen_events(
|
||||||
[e.event_id for e in result["auth_chain"]]
|
[e.event_id for e in result["auth_chain"]]
|
||||||
|
|
|
@ -828,14 +828,17 @@ class PresenceHandler(object):
|
||||||
# joins.
|
# joins.
|
||||||
continue
|
continue
|
||||||
|
|
||||||
event = yield self.store.get_event(event_id)
|
event = yield self.store.get_event(event_id, allow_none=True)
|
||||||
if event.content.get("membership") != Membership.JOIN:
|
if not event or event.content.get("membership") != Membership.JOIN:
|
||||||
# We only care about joins
|
# We only care about joins
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if prev_event_id:
|
if prev_event_id:
|
||||||
prev_event = yield self.store.get_event(prev_event_id)
|
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
|
||||||
if prev_event.content.get("membership") == Membership.JOIN:
|
if (
|
||||||
|
prev_event
|
||||||
|
and prev_event.content.get("membership") == Membership.JOIN
|
||||||
|
):
|
||||||
# Ignore changes to join events.
|
# Ignore changes to join events.
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
|
@ -115,6 +115,7 @@ class StatsHandler(StateDeltasHandler):
|
||||||
event_id = delta["event_id"]
|
event_id = delta["event_id"]
|
||||||
stream_id = delta["stream_id"]
|
stream_id = delta["stream_id"]
|
||||||
prev_event_id = delta["prev_event_id"]
|
prev_event_id = delta["prev_event_id"]
|
||||||
|
stream_pos = delta["stream_id"]
|
||||||
|
|
||||||
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
||||||
|
|
||||||
|
@ -136,10 +137,15 @@ class StatsHandler(StateDeltasHandler):
|
||||||
event_content = {}
|
event_content = {}
|
||||||
|
|
||||||
if event_id is not None:
|
if event_id is not None:
|
||||||
event_content = (yield self.store.get_event(event_id)).content or {}
|
event = yield self.store.get_event(event_id, allow_none=True)
|
||||||
|
if event:
|
||||||
|
event_content = event.content or {}
|
||||||
|
|
||||||
|
# We use stream_pos here rather than fetch by event_id as event_id
|
||||||
|
# may be None
|
||||||
|
now = yield self.store.get_received_ts_by_stream_pos(stream_pos)
|
||||||
|
|
||||||
# quantise time to the nearest bucket
|
# quantise time to the nearest bucket
|
||||||
now = yield self.store.get_received_ts(event_id)
|
|
||||||
now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size
|
now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size
|
||||||
|
|
||||||
if typ == EventTypes.Member:
|
if typ == EventTypes.Member:
|
||||||
|
@ -149,9 +155,11 @@ class StatsHandler(StateDeltasHandler):
|
||||||
# compare them.
|
# compare them.
|
||||||
prev_event_content = {}
|
prev_event_content = {}
|
||||||
if prev_event_id is not None:
|
if prev_event_id is not None:
|
||||||
prev_event_content = (
|
prev_event = yield self.store.get_event(
|
||||||
yield self.store.get_event(prev_event_id)
|
prev_event_id, allow_none=True,
|
||||||
).content
|
)
|
||||||
|
if prev_event:
|
||||||
|
prev_event_content = prev_event.content
|
||||||
|
|
||||||
membership = event_content.get("membership", Membership.LEAVE)
|
membership = event_content.get("membership", Membership.LEAVE)
|
||||||
prev_membership = prev_event_content.get("membership", Membership.LEAVE)
|
prev_membership = prev_event_content.get("membership", Membership.LEAVE)
|
||||||
|
|
|
@ -39,6 +39,7 @@ class VersionsRestServlet(RestServlet):
|
||||||
"r0.2.0",
|
"r0.2.0",
|
||||||
"r0.3.0",
|
"r0.3.0",
|
||||||
"r0.4.0",
|
"r0.4.0",
|
||||||
|
"r0.5.0",
|
||||||
],
|
],
|
||||||
# as per MSC1497:
|
# as per MSC1497:
|
||||||
"unstable_features": {
|
"unstable_features": {
|
||||||
|
|
|
@ -78,6 +78,43 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
desc="get_received_ts",
|
desc="get_received_ts",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def get_received_ts_by_stream_pos(self, stream_ordering):
|
||||||
|
"""Given a stream ordering get an approximate timestamp of when it
|
||||||
|
happened.
|
||||||
|
|
||||||
|
This is done by simply taking the received ts of the first event that
|
||||||
|
has a stream ordering greater than or equal to the given stream pos.
|
||||||
|
If none exists returns the current time, on the assumption that it must
|
||||||
|
have happened recently.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
stream_ordering (int)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[int]
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _get_approximate_received_ts_txn(txn):
|
||||||
|
sql = """
|
||||||
|
SELECT received_ts FROM events
|
||||||
|
WHERE stream_ordering >= ?
|
||||||
|
LIMIT 1
|
||||||
|
"""
|
||||||
|
|
||||||
|
txn.execute(sql, (stream_ordering,))
|
||||||
|
row = txn.fetchone()
|
||||||
|
if row and row[0]:
|
||||||
|
ts = row[0]
|
||||||
|
else:
|
||||||
|
ts = self.clock.time_msec()
|
||||||
|
|
||||||
|
return ts
|
||||||
|
|
||||||
|
return self.runInteraction(
|
||||||
|
"get_approximate_received_ts",
|
||||||
|
_get_approximate_received_ts_txn,
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_event(
|
def get_event(
|
||||||
self,
|
self,
|
||||||
|
|
|
@ -204,7 +204,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
|
||||||
"a2": {"membership": "not a real thing"},
|
"a2": {"membership": "not a real thing"},
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_event(event_id):
|
def get_event(event_id, allow_none=True):
|
||||||
m = Mock()
|
m = Mock()
|
||||||
m.content = events[event_id]
|
m.content = events[event_id]
|
||||||
d = defer.Deferred()
|
d = defer.Deferred()
|
||||||
|
@ -224,7 +224,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
|
||||||
"room_id": "room",
|
"room_id": "room",
|
||||||
"event_id": "a1",
|
"event_id": "a1",
|
||||||
"prev_event_id": "a2",
|
"prev_event_id": "a2",
|
||||||
"stream_id": "bleb",
|
"stream_id": 60,
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -241,7 +241,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
|
||||||
"room_id": "room",
|
"room_id": "room",
|
||||||
"event_id": "a2",
|
"event_id": "a2",
|
||||||
"prev_event_id": "a1",
|
"prev_event_id": "a1",
|
||||||
"stream_id": "bleb",
|
"stream_id": 100,
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -249,3 +249,59 @@ class StatsRoomTests(unittest.HomeserverTestCase):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
f.value.args[0], "'not a real thing' is not a valid membership"
|
f.value.args[0], "'not a real thing' is not a valid membership"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_redacted_prev_event(self):
|
||||||
|
"""
|
||||||
|
If the prev_event does not exist, then it is assumed to be a LEAVE.
|
||||||
|
"""
|
||||||
|
u1 = self.register_user("u1", "pass")
|
||||||
|
u1_token = self.login("u1", "pass")
|
||||||
|
|
||||||
|
room_1 = self.helper.create_room_as(u1, tok=u1_token)
|
||||||
|
|
||||||
|
# Do the initial population of the user directory via the background update
|
||||||
|
self._add_background_updates()
|
||||||
|
|
||||||
|
while not self.get_success(self.store.has_completed_background_updates()):
|
||||||
|
self.get_success(self.store.do_next_background_update(100), by=0.1)
|
||||||
|
|
||||||
|
events = {
|
||||||
|
"a1": None,
|
||||||
|
"a2": {"membership": Membership.JOIN},
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_event(event_id, allow_none=True):
|
||||||
|
if events.get(event_id):
|
||||||
|
m = Mock()
|
||||||
|
m.content = events[event_id]
|
||||||
|
else:
|
||||||
|
m = None
|
||||||
|
d = defer.Deferred()
|
||||||
|
self.reactor.callLater(0.0, d.callback, m)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def get_received_ts(event_id):
|
||||||
|
return defer.succeed(1)
|
||||||
|
|
||||||
|
self.store.get_received_ts = get_received_ts
|
||||||
|
self.store.get_event = get_event
|
||||||
|
|
||||||
|
deltas = [
|
||||||
|
{
|
||||||
|
"type": EventTypes.Member,
|
||||||
|
"state_key": "some_user:test",
|
||||||
|
"room_id": room_1,
|
||||||
|
"event_id": "a2",
|
||||||
|
"prev_event_id": "a1",
|
||||||
|
"stream_id": 100,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Handle our fake deltas, which has a user going from LEAVE -> JOIN.
|
||||||
|
self.get_success(self.handler._handle_deltas(deltas))
|
||||||
|
|
||||||
|
# One delta, with two joined members -- the room creator, and our fake
|
||||||
|
# user.
|
||||||
|
r = self.get_success(self.store.get_deltas_for_room(room_1, 0))
|
||||||
|
self.assertEqual(len(r), 1)
|
||||||
|
self.assertEqual(r[0]["joined_members"], 2)
|
||||||
|
|
Loading…
Reference in a new issue