Move background update handling out of store

This commit is contained in:
Erik Johnston 2019-12-04 15:09:36 +00:00
parent 8863624f78
commit 4a33a6dd19
27 changed files with 281 additions and 200 deletions

View file

@ -436,7 +436,7 @@ def setup(config_options):
_base.start(hs, config.listeners) _base.start(hs, config.listeners)
hs.get_pusherpool().start() hs.get_pusherpool().start()
hs.get_datastore().start_doing_background_updates() hs.get_datastore().db.updates.start_doing_background_updates()
except Exception: except Exception:
# Print the exception and bail out. # Print the exception and bail out.
print("Error during startup:", file=sys.stderr) print("Error during startup:", file=sys.stderr)

View file

@ -402,7 +402,7 @@ class PreviewUrlResource(DirectServeResource):
logger.info("Running url preview cache expiry") logger.info("Running url preview cache expiry")
if not (yield self.store.has_completed_background_updates()): if not (yield self.store.db.updates.has_completed_background_updates()):
logger.info("Still running DB updates; skipping expiry") logger.info("Still running DB updates; skipping expiry")
return return

View file

@ -22,7 +22,6 @@ from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from . import engines from . import engines
from ._base import SQLBaseStore
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -74,7 +73,7 @@ class BackgroundUpdatePerformance(object):
return float(self.total_item_count) / float(self.total_duration_ms) return float(self.total_item_count) / float(self.total_duration_ms)
class BackgroundUpdateStore(SQLBaseStore): class BackgroundUpdater(object):
""" Background updates are updates to the database that run in the """ Background updates are updates to the database that run in the
background. Each update processes a batch of data at once. We attempt to background. Each update processes a batch of data at once. We attempt to
limit the impact of each update by monitoring how long each batch takes to limit the impact of each update by monitoring how long each batch takes to
@ -86,8 +85,10 @@ class BackgroundUpdateStore(SQLBaseStore):
BACKGROUND_UPDATE_INTERVAL_MS = 1000 BACKGROUND_UPDATE_INTERVAL_MS = 1000
BACKGROUND_UPDATE_DURATION_MS = 100 BACKGROUND_UPDATE_DURATION_MS = 100
def __init__(self, db_conn, hs): def __init__(self, hs, database):
super(BackgroundUpdateStore, self).__init__(db_conn, hs) self._clock = hs.get_clock()
self.db = database
self._background_update_performance = {} self._background_update_performance = {}
self._background_update_queue = [] self._background_update_queue = []
self._background_update_handlers = {} self._background_update_handlers = {}
@ -101,9 +102,7 @@ class BackgroundUpdateStore(SQLBaseStore):
logger.info("Starting background schema updates") logger.info("Starting background schema updates")
while True: while True:
if sleep: if sleep:
yield self.hs.get_clock().sleep( yield self._clock.sleep(self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0)
self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0
)
try: try:
result = yield self.do_next_background_update( result = yield self.do_next_background_update(
@ -380,7 +379,7 @@ class BackgroundUpdateStore(SQLBaseStore):
logger.debug("[SQL] %s", sql) logger.debug("[SQL] %s", sql)
c.execute(sql) c.execute(sql)
if isinstance(self.database_engine, engines.PostgresEngine): if isinstance(self.db.database_engine, engines.PostgresEngine):
runner = create_index_psql runner = create_index_psql
elif psql_only: elif psql_only:
runner = None runner = None

View file

@ -20,7 +20,7 @@ from six import iteritems
from twisted.internet import defer from twisted.internet import defer
from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage import background_updates from synapse.storage._base import SQLBaseStore
from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.caches import CACHE_SIZE_FACTOR
from synapse.util.caches.descriptors import Cache from synapse.util.caches.descriptors import Cache
@ -32,41 +32,41 @@ logger = logging.getLogger(__name__)
LAST_SEEN_GRANULARITY = 120 * 1000 LAST_SEEN_GRANULARITY = 120 * 1000
class ClientIpBackgroundUpdateStore(background_updates.BackgroundUpdateStore): class ClientIpBackgroundUpdateStore(SQLBaseStore):
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(ClientIpBackgroundUpdateStore, self).__init__(db_conn, hs) super(ClientIpBackgroundUpdateStore, self).__init__(db_conn, hs)
self.register_background_index_update( self.db.updates.register_background_index_update(
"user_ips_device_index", "user_ips_device_index",
index_name="user_ips_device_id", index_name="user_ips_device_id",
table="user_ips", table="user_ips",
columns=["user_id", "device_id", "last_seen"], columns=["user_id", "device_id", "last_seen"],
) )
self.register_background_index_update( self.db.updates.register_background_index_update(
"user_ips_last_seen_index", "user_ips_last_seen_index",
index_name="user_ips_last_seen", index_name="user_ips_last_seen",
table="user_ips", table="user_ips",
columns=["user_id", "last_seen"], columns=["user_id", "last_seen"],
) )
self.register_background_index_update( self.db.updates.register_background_index_update(
"user_ips_last_seen_only_index", "user_ips_last_seen_only_index",
index_name="user_ips_last_seen_only", index_name="user_ips_last_seen_only",
table="user_ips", table="user_ips",
columns=["last_seen"], columns=["last_seen"],
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"user_ips_analyze", self._analyze_user_ip "user_ips_analyze", self._analyze_user_ip
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"user_ips_remove_dupes", self._remove_user_ip_dupes "user_ips_remove_dupes", self._remove_user_ip_dupes
) )
# Register a unique index # Register a unique index
self.register_background_index_update( self.db.updates.register_background_index_update(
"user_ips_device_unique_index", "user_ips_device_unique_index",
index_name="user_ips_user_token_ip_unique_index", index_name="user_ips_user_token_ip_unique_index",
table="user_ips", table="user_ips",
@ -75,12 +75,12 @@ class ClientIpBackgroundUpdateStore(background_updates.BackgroundUpdateStore):
) )
# Drop the old non-unique index # Drop the old non-unique index
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"user_ips_drop_nonunique_index", self._remove_user_ip_nonunique "user_ips_drop_nonunique_index", self._remove_user_ip_nonunique
) )
# Update the last seen info in devices. # Update the last seen info in devices.
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"devices_last_seen", self._devices_last_seen_update "devices_last_seen", self._devices_last_seen_update
) )
@ -92,7 +92,7 @@ class ClientIpBackgroundUpdateStore(background_updates.BackgroundUpdateStore):
txn.close() txn.close()
yield self.db.runWithConnection(f) yield self.db.runWithConnection(f)
yield self._end_background_update("user_ips_drop_nonunique_index") yield self.db.updates._end_background_update("user_ips_drop_nonunique_index")
return 1 return 1
@defer.inlineCallbacks @defer.inlineCallbacks
@ -108,7 +108,7 @@ class ClientIpBackgroundUpdateStore(background_updates.BackgroundUpdateStore):
yield self.db.runInteraction("user_ips_analyze", user_ips_analyze) yield self.db.runInteraction("user_ips_analyze", user_ips_analyze)
yield self._end_background_update("user_ips_analyze") yield self.db.updates._end_background_update("user_ips_analyze")
return 1 return 1
@ -271,14 +271,14 @@ class ClientIpBackgroundUpdateStore(background_updates.BackgroundUpdateStore):
(user_id, access_token, ip, device_id, user_agent, last_seen), (user_id, access_token, ip, device_id, user_agent, last_seen),
) )
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, "user_ips_remove_dupes", {"last_seen": end_last_seen} txn, "user_ips_remove_dupes", {"last_seen": end_last_seen}
) )
yield self.db.runInteraction("user_ips_dups_remove", remove) yield self.db.runInteraction("user_ips_dups_remove", remove)
if last: if last:
yield self._end_background_update("user_ips_remove_dupes") yield self.db.updates._end_background_update("user_ips_remove_dupes")
return batch_size return batch_size
@ -344,7 +344,7 @@ class ClientIpBackgroundUpdateStore(background_updates.BackgroundUpdateStore):
txn.execute_batch(sql, rows) txn.execute_batch(sql, rows)
_, _, _, user_id, device_id = rows[-1] _, _, _, user_id, device_id = rows[-1]
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, txn,
"devices_last_seen", "devices_last_seen",
{"last_user_id": user_id, "last_device_id": device_id}, {"last_user_id": user_id, "last_device_id": device_id},
@ -357,7 +357,7 @@ class ClientIpBackgroundUpdateStore(background_updates.BackgroundUpdateStore):
) )
if not updated: if not updated:
yield self._end_background_update("devices_last_seen") yield self.db.updates._end_background_update("devices_last_seen")
return updated return updated
@ -546,7 +546,9 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
# Nothing to do # Nothing to do
return return
if not await self.has_completed_background_update("devices_last_seen"): if not await self.db.updates.has_completed_background_update(
"devices_last_seen"
):
# Only start pruning if we have finished populating the devices # Only start pruning if we have finished populating the devices
# last seen info. # last seen info.
return return

View file

@ -21,7 +21,6 @@ from twisted.internet import defer
from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.expiringcache import ExpiringCache
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -208,20 +207,20 @@ class DeviceInboxWorkerStore(SQLBaseStore):
) )
class DeviceInboxBackgroundUpdateStore(BackgroundUpdateStore): class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(DeviceInboxBackgroundUpdateStore, self).__init__(db_conn, hs) super(DeviceInboxBackgroundUpdateStore, self).__init__(db_conn, hs)
self.register_background_index_update( self.db.updates.register_background_index_update(
"device_inbox_stream_index", "device_inbox_stream_index",
index_name="device_inbox_stream_id_user_id", index_name="device_inbox_stream_id_user_id",
table="device_inbox", table="device_inbox",
columns=["stream_id", "user_id"], columns=["stream_id", "user_id"],
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
self.DEVICE_INBOX_STREAM_ID, self._background_drop_index_device_inbox self.DEVICE_INBOX_STREAM_ID, self._background_drop_index_device_inbox
) )
@ -234,7 +233,7 @@ class DeviceInboxBackgroundUpdateStore(BackgroundUpdateStore):
yield self.db.runWithConnection(reindex_txn) yield self.db.runWithConnection(reindex_txn)
yield self._end_background_update(self.DEVICE_INBOX_STREAM_ID) yield self.db.updates._end_background_update(self.DEVICE_INBOX_STREAM_ID)
return 1 return 1

View file

@ -31,7 +31,6 @@ from synapse.logging.opentracing import (
) )
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.types import get_verify_key_from_cross_signing_key from synapse.types import get_verify_key_from_cross_signing_key
from synapse.util import batch_iter from synapse.util import batch_iter
from synapse.util.caches.descriptors import ( from synapse.util.caches.descriptors import (
@ -642,11 +641,11 @@ class DeviceWorkerStore(SQLBaseStore):
return results return results
class DeviceBackgroundUpdateStore(BackgroundUpdateStore): class DeviceBackgroundUpdateStore(SQLBaseStore):
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(DeviceBackgroundUpdateStore, self).__init__(db_conn, hs) super(DeviceBackgroundUpdateStore, self).__init__(db_conn, hs)
self.register_background_index_update( self.db.updates.register_background_index_update(
"device_lists_stream_idx", "device_lists_stream_idx",
index_name="device_lists_stream_user_id", index_name="device_lists_stream_user_id",
table="device_lists_stream", table="device_lists_stream",
@ -654,7 +653,7 @@ class DeviceBackgroundUpdateStore(BackgroundUpdateStore):
) )
# create a unique index on device_lists_remote_cache # create a unique index on device_lists_remote_cache
self.register_background_index_update( self.db.updates.register_background_index_update(
"device_lists_remote_cache_unique_idx", "device_lists_remote_cache_unique_idx",
index_name="device_lists_remote_cache_unique_id", index_name="device_lists_remote_cache_unique_id",
table="device_lists_remote_cache", table="device_lists_remote_cache",
@ -663,7 +662,7 @@ class DeviceBackgroundUpdateStore(BackgroundUpdateStore):
) )
# And one on device_lists_remote_extremeties # And one on device_lists_remote_extremeties
self.register_background_index_update( self.db.updates.register_background_index_update(
"device_lists_remote_extremeties_unique_idx", "device_lists_remote_extremeties_unique_idx",
index_name="device_lists_remote_extremeties_unique_idx", index_name="device_lists_remote_extremeties_unique_idx",
table="device_lists_remote_extremeties", table="device_lists_remote_extremeties",
@ -672,7 +671,7 @@ class DeviceBackgroundUpdateStore(BackgroundUpdateStore):
) )
# once they complete, we can remove the old non-unique indexes. # once they complete, we can remove the old non-unique indexes.
self.register_background_update_handler( self.db.updates.register_background_update_handler(
DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES, DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES,
self._drop_device_list_streams_non_unique_indexes, self._drop_device_list_streams_non_unique_indexes,
) )
@ -686,7 +685,9 @@ class DeviceBackgroundUpdateStore(BackgroundUpdateStore):
txn.close() txn.close()
yield self.db.runWithConnection(f) yield self.db.runWithConnection(f)
yield self._end_background_update(DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES) yield self.db.updates._end_background_update(
DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES
)
return 1 return 1

View file

@ -494,7 +494,7 @@ class EventFederationStore(EventFederationWorkerStore):
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(EventFederationStore, self).__init__(db_conn, hs) super(EventFederationStore, self).__init__(db_conn, hs)
self.register_background_update_handler( self.db.updates.register_background_update_handler(
self.EVENT_AUTH_STATE_ONLY, self._background_delete_non_state_event_auth self.EVENT_AUTH_STATE_ONLY, self._background_delete_non_state_event_auth
) )
@ -654,7 +654,7 @@ class EventFederationStore(EventFederationWorkerStore):
"max_stream_id_exclusive": min_stream_id, "max_stream_id_exclusive": min_stream_id,
} }
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, self.EVENT_AUTH_STATE_ONLY, new_progress txn, self.EVENT_AUTH_STATE_ONLY, new_progress
) )
@ -665,6 +665,6 @@ class EventFederationStore(EventFederationWorkerStore):
) )
if not result: if not result:
yield self._end_background_update(self.EVENT_AUTH_STATE_ONLY) yield self.db.updates._end_background_update(self.EVENT_AUTH_STATE_ONLY)
return batch_size return batch_size

View file

@ -614,14 +614,14 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(EventPushActionsStore, self).__init__(db_conn, hs) super(EventPushActionsStore, self).__init__(db_conn, hs)
self.register_background_index_update( self.db.updates.register_background_index_update(
self.EPA_HIGHLIGHT_INDEX, self.EPA_HIGHLIGHT_INDEX,
index_name="event_push_actions_u_highlight", index_name="event_push_actions_u_highlight",
table="event_push_actions", table="event_push_actions",
columns=["user_id", "stream_ordering"], columns=["user_id", "stream_ordering"],
) )
self.register_background_index_update( self.db.updates.register_background_index_update(
"event_push_actions_highlights_index", "event_push_actions_highlights_index",
index_name="event_push_actions_highlights_index", index_name="event_push_actions_highlights_index",
table="event_push_actions", table="event_push_actions",

View file

@ -38,7 +38,6 @@ from synapse.logging.utils import log_function
from synapse.metrics import BucketCollector from synapse.metrics import BucketCollector
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import make_in_list_sql_clause from synapse.storage._base import make_in_list_sql_clause
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.data_stores.main.event_federation import EventFederationStore from synapse.storage.data_stores.main.event_federation import EventFederationStore
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
from synapse.storage.data_stores.main.state import StateGroupWorkerStore from synapse.storage.data_stores.main.state import StateGroupWorkerStore
@ -94,10 +93,7 @@ def _retry_on_integrity_error(func):
# inherits from EventFederationStore so that we can call _update_backward_extremities # inherits from EventFederationStore so that we can call _update_backward_extremities
# and _handle_mult_prev_events (though arguably those could both be moved in here) # and _handle_mult_prev_events (though arguably those could both be moved in here)
class EventsStore( class EventsStore(
StateGroupWorkerStore, StateGroupWorkerStore, EventFederationStore, EventsWorkerStore,
EventFederationStore,
EventsWorkerStore,
BackgroundUpdateStore,
): ):
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(EventsStore, self).__init__(db_conn, hs) super(EventsStore, self).__init__(db_conn, hs)

View file

@ -22,13 +22,12 @@ from canonicaljson import json
from twisted.internet import defer from twisted.internet import defer
from synapse.api.constants import EventContentFields from synapse.api.constants import EventContentFields
from synapse.storage._base import make_in_list_sql_clause from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
from synapse.storage.background_updates import BackgroundUpdateStore
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class EventsBackgroundUpdatesStore(BackgroundUpdateStore): class EventsBackgroundUpdatesStore(SQLBaseStore):
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
@ -37,15 +36,15 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs)
self.register_background_update_handler( self.db.updates.register_background_update_handler(
self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
self._background_reindex_fields_sender, self._background_reindex_fields_sender,
) )
self.register_background_index_update( self.db.updates.register_background_index_update(
"event_contains_url_index", "event_contains_url_index",
index_name="event_contains_url_index", index_name="event_contains_url_index",
table="events", table="events",
@ -56,7 +55,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
# an event_id index on event_search is useful for the purge_history # an event_id index on event_search is useful for the purge_history
# api. Plus it means we get to enforce some integrity with a UNIQUE # api. Plus it means we get to enforce some integrity with a UNIQUE
# clause # clause
self.register_background_index_update( self.db.updates.register_background_index_update(
"event_search_event_id_idx", "event_search_event_id_idx",
index_name="event_search_event_id_idx", index_name="event_search_event_id_idx",
table="event_search", table="event_search",
@ -65,16 +64,16 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
psql_only=True, psql_only=True,
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"redactions_received_ts", self._redactions_received_ts "redactions_received_ts", self._redactions_received_ts
) )
# This index gets deleted in `event_fix_redactions_bytes` update # This index gets deleted in `event_fix_redactions_bytes` update
self.register_background_index_update( self.db.updates.register_background_index_update(
"event_fix_redactions_bytes_create_index", "event_fix_redactions_bytes_create_index",
index_name="redactions_censored_redacts", index_name="redactions_censored_redacts",
table="redactions", table="redactions",
@ -82,11 +81,11 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
where_clause="have_censored", where_clause="have_censored",
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"event_fix_redactions_bytes", self._event_fix_redactions_bytes "event_fix_redactions_bytes", self._event_fix_redactions_bytes
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"event_store_labels", self._event_store_labels "event_store_labels", self._event_store_labels
) )
@ -145,7 +144,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
"rows_inserted": rows_inserted + len(rows), "rows_inserted": rows_inserted + len(rows),
} }
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
) )
@ -156,7 +155,9 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
) )
if not result: if not result:
yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) yield self.db.updates._end_background_update(
self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME
)
return result return result
@ -222,7 +223,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
"rows_inserted": rows_inserted + len(rows_to_update), "rows_inserted": rows_inserted + len(rows_to_update),
} }
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
) )
@ -233,7 +234,9 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
) )
if not result: if not result:
yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) yield self.db.updates._end_background_update(
self.EVENT_ORIGIN_SERVER_TS_NAME
)
return result return result
@ -411,7 +414,9 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
) )
if not num_handled: if not num_handled:
yield self._end_background_update(self.DELETE_SOFT_FAILED_EXTREMITIES) yield self.db.updates._end_background_update(
self.DELETE_SOFT_FAILED_EXTREMITIES
)
def _drop_table_txn(txn): def _drop_table_txn(txn):
txn.execute("DROP TABLE _extremities_to_check") txn.execute("DROP TABLE _extremities_to_check")
@ -464,7 +469,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
txn.execute(sql, (self._clock.time_msec(), last_event_id, upper_event_id)) txn.execute(sql, (self._clock.time_msec(), last_event_id, upper_event_id))
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, "redactions_received_ts", {"last_event_id": upper_event_id} txn, "redactions_received_ts", {"last_event_id": upper_event_id}
) )
@ -475,7 +480,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
) )
if not count: if not count:
yield self._end_background_update("redactions_received_ts") yield self.db.updates._end_background_update("redactions_received_ts")
return count return count
@ -505,7 +510,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
"_event_fix_redactions_bytes", _event_fix_redactions_bytes_txn "_event_fix_redactions_bytes", _event_fix_redactions_bytes_txn
) )
yield self._end_background_update("event_fix_redactions_bytes") yield self.db.updates._end_background_update("event_fix_redactions_bytes")
return 1 return 1
@ -559,7 +564,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
nbrows += 1 nbrows += 1
last_row_event_id = event_id last_row_event_id = event_id
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, "event_store_labels", {"last_event_id": last_row_event_id} txn, "event_store_labels", {"last_event_id": last_row_event_id}
) )
@ -570,6 +575,6 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
) )
if not num_rows: if not num_rows:
yield self._end_background_update("event_store_labels") yield self.db.updates._end_background_update("event_store_labels")
return num_rows return num_rows

View file

@ -12,14 +12,14 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage._base import SQLBaseStore
class MediaRepositoryBackgroundUpdateStore(BackgroundUpdateStore): class MediaRepositoryBackgroundUpdateStore(SQLBaseStore):
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(MediaRepositoryBackgroundUpdateStore, self).__init__(db_conn, hs) super(MediaRepositoryBackgroundUpdateStore, self).__init__(db_conn, hs)
self.register_background_index_update( self.db.updates.register_background_index_update(
update_name="local_media_repository_url_idx", update_name="local_media_repository_url_idx",
index_name="local_media_repository_url_idx", index_name="local_media_repository_url_idx",
table="local_media_repository", table="local_media_repository",

View file

@ -26,7 +26,6 @@ from twisted.internet.defer import Deferred
from synapse.api.constants import UserTypes from synapse.api.constants import UserTypes
from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage import background_updates
from synapse.storage._base import SQLBaseStore from synapse.storage._base import SQLBaseStore
from synapse.types import UserID from synapse.types import UserID
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
@ -794,23 +793,21 @@ class RegistrationWorkerStore(SQLBaseStore):
) )
class RegistrationBackgroundUpdateStore( class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
RegistrationWorkerStore, background_updates.BackgroundUpdateStore
):
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(RegistrationBackgroundUpdateStore, self).__init__(db_conn, hs) super(RegistrationBackgroundUpdateStore, self).__init__(db_conn, hs)
self.clock = hs.get_clock() self.clock = hs.get_clock()
self.config = hs.config self.config = hs.config
self.register_background_index_update( self.db.updates.register_background_index_update(
"access_tokens_device_index", "access_tokens_device_index",
index_name="access_tokens_device_id", index_name="access_tokens_device_id",
table="access_tokens", table="access_tokens",
columns=["user_id", "device_id"], columns=["user_id", "device_id"],
) )
self.register_background_index_update( self.db.updates.register_background_index_update(
"users_creation_ts", "users_creation_ts",
index_name="users_creation_ts", index_name="users_creation_ts",
table="users", table="users",
@ -820,13 +817,13 @@ class RegistrationBackgroundUpdateStore(
# we no longer use refresh tokens, but it's possible that some people # we no longer use refresh tokens, but it's possible that some people
# might have a background update queued to build this index. Just # might have a background update queued to build this index. Just
# clear the background update. # clear the background update.
self.register_noop_background_update("refresh_tokens_device_index") self.db.updates.register_noop_background_update("refresh_tokens_device_index")
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"user_threepids_grandfather", self._bg_user_threepids_grandfather "user_threepids_grandfather", self._bg_user_threepids_grandfather
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"users_set_deactivated_flag", self._background_update_set_deactivated_flag "users_set_deactivated_flag", self._background_update_set_deactivated_flag
) )
@ -873,7 +870,7 @@ class RegistrationBackgroundUpdateStore(
logger.info("Marked %d rows as deactivated", rows_processed_nb) logger.info("Marked %d rows as deactivated", rows_processed_nb)
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, "users_set_deactivated_flag", {"user_id": rows[-1]["name"]} txn, "users_set_deactivated_flag", {"user_id": rows[-1]["name"]}
) )
@ -887,7 +884,7 @@ class RegistrationBackgroundUpdateStore(
) )
if end: if end:
yield self._end_background_update("users_set_deactivated_flag") yield self.db.updates._end_background_update("users_set_deactivated_flag")
return nb_processed return nb_processed
@ -917,7 +914,7 @@ class RegistrationBackgroundUpdateStore(
"_bg_user_threepids_grandfather", _bg_user_threepids_grandfather_txn "_bg_user_threepids_grandfather", _bg_user_threepids_grandfather_txn
) )
yield self._end_background_update("user_threepids_grandfather") yield self.db.updates._end_background_update("user_threepids_grandfather")
return 1 return 1

View file

@ -28,7 +28,6 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes from synapse.api.constants import EventTypes
from synapse.api.errors import StoreError from synapse.api.errors import StoreError
from synapse.storage._base import SQLBaseStore from synapse.storage._base import SQLBaseStore
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.data_stores.main.search import SearchStore from synapse.storage.data_stores.main.search import SearchStore
from synapse.types import ThirdPartyInstanceID from synapse.types import ThirdPartyInstanceID
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
@ -361,13 +360,13 @@ class RoomWorkerStore(SQLBaseStore):
defer.returnValue(row) defer.returnValue(row)
class RoomBackgroundUpdateStore(BackgroundUpdateStore): class RoomBackgroundUpdateStore(SQLBaseStore):
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(RoomBackgroundUpdateStore, self).__init__(db_conn, hs) super(RoomBackgroundUpdateStore, self).__init__(db_conn, hs)
self.config = hs.config self.config = hs.config
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"insert_room_retention", self._background_insert_retention, "insert_room_retention", self._background_insert_retention,
) )
@ -421,7 +420,7 @@ class RoomBackgroundUpdateStore(BackgroundUpdateStore):
logger.info("Inserted %d rows into room_retention", len(rows)) logger.info("Inserted %d rows into room_retention", len(rows))
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]} txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]}
) )
@ -435,7 +434,7 @@ class RoomBackgroundUpdateStore(BackgroundUpdateStore):
) )
if end: if end:
yield self._end_background_update("insert_room_retention") yield self.db.updates._end_background_update("insert_room_retention")
defer.returnValue(batch_size) defer.returnValue(batch_size)

View file

@ -26,8 +26,11 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership from synapse.api.constants import EventTypes, Membership
from synapse.metrics import LaterGauge from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import LoggingTransaction, make_in_list_sql_clause from synapse.storage._base import (
from synapse.storage.background_updates import BackgroundUpdateStore LoggingTransaction,
SQLBaseStore,
make_in_list_sql_clause,
)
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
from synapse.storage.engines import Sqlite3Engine from synapse.storage.engines import Sqlite3Engine
from synapse.storage.roommember import ( from synapse.storage.roommember import (
@ -831,17 +834,17 @@ class RoomMemberWorkerStore(EventsWorkerStore):
) )
class RoomMemberBackgroundUpdateStore(BackgroundUpdateStore): class RoomMemberBackgroundUpdateStore(SQLBaseStore):
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(RoomMemberBackgroundUpdateStore, self).__init__(db_conn, hs) super(RoomMemberBackgroundUpdateStore, self).__init__(db_conn, hs)
self.register_background_update_handler( self.db.updates.register_background_update_handler(
_MEMBERSHIP_PROFILE_UPDATE_NAME, self._background_add_membership_profile _MEMBERSHIP_PROFILE_UPDATE_NAME, self._background_add_membership_profile
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME,
self._background_current_state_membership, self._background_current_state_membership,
) )
self.register_background_index_update( self.db.updates.register_background_index_update(
"room_membership_forgotten_idx", "room_membership_forgotten_idx",
index_name="room_memberships_user_room_forgotten", index_name="room_memberships_user_room_forgotten",
table="room_memberships", table="room_memberships",
@ -909,7 +912,7 @@ class RoomMemberBackgroundUpdateStore(BackgroundUpdateStore):
"max_stream_id_exclusive": min_stream_id, "max_stream_id_exclusive": min_stream_id,
} }
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, _MEMBERSHIP_PROFILE_UPDATE_NAME, progress txn, _MEMBERSHIP_PROFILE_UPDATE_NAME, progress
) )
@ -920,7 +923,9 @@ class RoomMemberBackgroundUpdateStore(BackgroundUpdateStore):
) )
if not result: if not result:
yield self._end_background_update(_MEMBERSHIP_PROFILE_UPDATE_NAME) yield self.db.updates._end_background_update(
_MEMBERSHIP_PROFILE_UPDATE_NAME
)
return result return result
@ -959,7 +964,7 @@ class RoomMemberBackgroundUpdateStore(BackgroundUpdateStore):
last_processed_room = next_room last_processed_room = next_room
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, txn,
_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME,
{"last_processed_room": last_processed_room}, {"last_processed_room": last_processed_room},
@ -978,7 +983,9 @@ class RoomMemberBackgroundUpdateStore(BackgroundUpdateStore):
) )
if finished: if finished:
yield self._end_background_update(_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME) yield self.db.updates._end_background_update(
_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME
)
return row_count return row_count

View file

@ -24,8 +24,7 @@ from canonicaljson import json
from twisted.internet import defer from twisted.internet import defer
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.storage._base import make_in_list_sql_clause from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.storage.engines import PostgresEngine, Sqlite3Engine
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -36,7 +35,7 @@ SearchEntry = namedtuple(
) )
class SearchBackgroundUpdateStore(BackgroundUpdateStore): class SearchBackgroundUpdateStore(SQLBaseStore):
EVENT_SEARCH_UPDATE_NAME = "event_search" EVENT_SEARCH_UPDATE_NAME = "event_search"
EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order" EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order"
@ -49,10 +48,10 @@ class SearchBackgroundUpdateStore(BackgroundUpdateStore):
if not hs.config.enable_search: if not hs.config.enable_search:
return return
self.register_background_update_handler( self.db.updates.register_background_update_handler(
self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
self.EVENT_SEARCH_ORDER_UPDATE_NAME, self._background_reindex_search_order self.EVENT_SEARCH_ORDER_UPDATE_NAME, self._background_reindex_search_order
) )
@ -61,9 +60,11 @@ class SearchBackgroundUpdateStore(BackgroundUpdateStore):
# a GIN index. However, it's possible that some people might still have # a GIN index. However, it's possible that some people might still have
# the background update queued, so we register a handler to clear the # the background update queued, so we register a handler to clear the
# background update. # background update.
self.register_noop_background_update(self.EVENT_SEARCH_USE_GIST_POSTGRES_NAME) self.db.updates.register_noop_background_update(
self.EVENT_SEARCH_USE_GIST_POSTGRES_NAME
)
self.register_background_update_handler( self.db.updates.register_background_update_handler(
self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME, self._background_reindex_gin_search self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME, self._background_reindex_gin_search
) )
@ -153,7 +154,7 @@ class SearchBackgroundUpdateStore(BackgroundUpdateStore):
"rows_inserted": rows_inserted + len(event_search_rows), "rows_inserted": rows_inserted + len(event_search_rows),
} }
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, self.EVENT_SEARCH_UPDATE_NAME, progress txn, self.EVENT_SEARCH_UPDATE_NAME, progress
) )
@ -164,7 +165,7 @@ class SearchBackgroundUpdateStore(BackgroundUpdateStore):
) )
if not result: if not result:
yield self._end_background_update(self.EVENT_SEARCH_UPDATE_NAME) yield self.db.updates._end_background_update(self.EVENT_SEARCH_UPDATE_NAME)
return result return result
@ -208,7 +209,9 @@ class SearchBackgroundUpdateStore(BackgroundUpdateStore):
if isinstance(self.database_engine, PostgresEngine): if isinstance(self.database_engine, PostgresEngine):
yield self.db.runWithConnection(create_index) yield self.db.runWithConnection(create_index)
yield self._end_background_update(self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME) yield self.db.updates._end_background_update(
self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME
)
return 1 return 1
@defer.inlineCallbacks @defer.inlineCallbacks
@ -244,7 +247,7 @@ class SearchBackgroundUpdateStore(BackgroundUpdateStore):
yield self.db.runInteraction( yield self.db.runInteraction(
self.EVENT_SEARCH_ORDER_UPDATE_NAME, self.EVENT_SEARCH_ORDER_UPDATE_NAME,
self._background_update_progress_txn, self.db.updates._background_update_progress_txn,
self.EVENT_SEARCH_ORDER_UPDATE_NAME, self.EVENT_SEARCH_ORDER_UPDATE_NAME,
pg, pg,
) )
@ -274,7 +277,7 @@ class SearchBackgroundUpdateStore(BackgroundUpdateStore):
"have_added_indexes": True, "have_added_indexes": True,
} }
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, self.EVENT_SEARCH_ORDER_UPDATE_NAME, progress txn, self.EVENT_SEARCH_ORDER_UPDATE_NAME, progress
) )
@ -285,7 +288,9 @@ class SearchBackgroundUpdateStore(BackgroundUpdateStore):
) )
if not finished: if not finished:
yield self._end_background_update(self.EVENT_SEARCH_ORDER_UPDATE_NAME) yield self.db.updates._end_background_update(
self.EVENT_SEARCH_ORDER_UPDATE_NAME
)
return num_rows return num_rows

View file

@ -27,7 +27,6 @@ from synapse.api.errors import NotFoundError
from synapse.events import EventBase from synapse.events import EventBase
from synapse.events.snapshot import EventContext from synapse.events.snapshot import EventContext
from synapse.storage._base import SQLBaseStore from synapse.storage._base import SQLBaseStore
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
from synapse.storage.engines import PostgresEngine from synapse.storage.engines import PostgresEngine
from synapse.storage.state import StateFilter from synapse.storage.state import StateFilter
@ -1023,9 +1022,7 @@ class StateGroupWorkerStore(
return set(row["state_group"] for row in rows) return set(row["state_group"] for row in rows)
class StateBackgroundUpdateStore( class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
StateGroupBackgroundUpdateStore, BackgroundUpdateStore
):
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication" STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
@ -1034,21 +1031,21 @@ class StateBackgroundUpdateStore(
def __init__(self, db_conn, hs): def __init__(self, db_conn, hs):
super(StateBackgroundUpdateStore, self).__init__(db_conn, hs) super(StateBackgroundUpdateStore, self).__init__(db_conn, hs)
self.register_background_update_handler( self.db.updates.register_background_update_handler(
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
self._background_deduplicate_state, self._background_deduplicate_state,
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
self.STATE_GROUP_INDEX_UPDATE_NAME, self._background_index_state self.STATE_GROUP_INDEX_UPDATE_NAME, self._background_index_state
) )
self.register_background_index_update( self.db.updates.register_background_index_update(
self.CURRENT_STATE_INDEX_UPDATE_NAME, self.CURRENT_STATE_INDEX_UPDATE_NAME,
index_name="current_state_events_member_index", index_name="current_state_events_member_index",
table="current_state_events", table="current_state_events",
columns=["state_key"], columns=["state_key"],
where_clause="type='m.room.member'", where_clause="type='m.room.member'",
) )
self.register_background_index_update( self.db.updates.register_background_index_update(
self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME, self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME,
index_name="event_to_state_groups_sg_index", index_name="event_to_state_groups_sg_index",
table="event_to_state_groups", table="event_to_state_groups",
@ -1181,7 +1178,7 @@ class StateBackgroundUpdateStore(
"max_group": max_group, "max_group": max_group,
} }
self._background_update_progress_txn( self.db.updates._background_update_progress_txn(
txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress
) )
@ -1192,7 +1189,7 @@ class StateBackgroundUpdateStore(
) )
if finished: if finished:
yield self._end_background_update( yield self.db.updates._end_background_update(
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME
) )
@ -1224,7 +1221,7 @@ class StateBackgroundUpdateStore(
yield self.db.runWithConnection(reindex_txn) yield self.db.runWithConnection(reindex_txn)
yield self._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME) yield self.db.updates._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME)
return 1 return 1

View file

@ -68,17 +68,17 @@ class StatsStore(StateDeltasStore):
self.stats_delta_processing_lock = DeferredLock() self.stats_delta_processing_lock = DeferredLock()
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"populate_stats_process_rooms", self._populate_stats_process_rooms "populate_stats_process_rooms", self._populate_stats_process_rooms
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"populate_stats_process_users", self._populate_stats_process_users "populate_stats_process_users", self._populate_stats_process_users
) )
# we no longer need to perform clean-up, but we will give ourselves # we no longer need to perform clean-up, but we will give ourselves
# the potential to reintroduce it in the future so documentation # the potential to reintroduce it in the future so documentation
# will still encourage the use of this no-op handler. # will still encourage the use of this no-op handler.
self.register_noop_background_update("populate_stats_cleanup") self.db.updates.register_noop_background_update("populate_stats_cleanup")
self.register_noop_background_update("populate_stats_prepare") self.db.updates.register_noop_background_update("populate_stats_prepare")
def quantise_stats_time(self, ts): def quantise_stats_time(self, ts):
""" """
@ -102,7 +102,7 @@ class StatsStore(StateDeltasStore):
This is a background update which regenerates statistics for users. This is a background update which regenerates statistics for users.
""" """
if not self.stats_enabled: if not self.stats_enabled:
yield self._end_background_update("populate_stats_process_users") yield self.db.updates._end_background_update("populate_stats_process_users")
return 1 return 1
last_user_id = progress.get("last_user_id", "") last_user_id = progress.get("last_user_id", "")
@ -123,7 +123,7 @@ class StatsStore(StateDeltasStore):
# No more rooms -- complete the transaction. # No more rooms -- complete the transaction.
if not users_to_work_on: if not users_to_work_on:
yield self._end_background_update("populate_stats_process_users") yield self.db.updates._end_background_update("populate_stats_process_users")
return 1 return 1
for user_id in users_to_work_on: for user_id in users_to_work_on:
@ -132,7 +132,7 @@ class StatsStore(StateDeltasStore):
yield self.db.runInteraction( yield self.db.runInteraction(
"populate_stats_process_users", "populate_stats_process_users",
self._background_update_progress_txn, self.db.updates._background_update_progress_txn,
"populate_stats_process_users", "populate_stats_process_users",
progress, progress,
) )
@ -145,7 +145,7 @@ class StatsStore(StateDeltasStore):
This is a background update which regenerates statistics for rooms. This is a background update which regenerates statistics for rooms.
""" """
if not self.stats_enabled: if not self.stats_enabled:
yield self._end_background_update("populate_stats_process_rooms") yield self.db.updates._end_background_update("populate_stats_process_rooms")
return 1 return 1
last_room_id = progress.get("last_room_id", "") last_room_id = progress.get("last_room_id", "")
@ -166,7 +166,7 @@ class StatsStore(StateDeltasStore):
# No more rooms -- complete the transaction. # No more rooms -- complete the transaction.
if not rooms_to_work_on: if not rooms_to_work_on:
yield self._end_background_update("populate_stats_process_rooms") yield self.db.updates._end_background_update("populate_stats_process_rooms")
return 1 return 1
for room_id in rooms_to_work_on: for room_id in rooms_to_work_on:
@ -175,7 +175,7 @@ class StatsStore(StateDeltasStore):
yield self.db.runInteraction( yield self.db.runInteraction(
"_populate_stats_process_rooms", "_populate_stats_process_rooms",
self._background_update_progress_txn, self.db.updates._background_update_progress_txn,
"populate_stats_process_rooms", "populate_stats_process_rooms",
progress, progress,
) )

View file

@ -19,7 +19,6 @@ import re
from twisted.internet import defer from twisted.internet import defer
from synapse.api.constants import EventTypes, JoinRules from synapse.api.constants import EventTypes, JoinRules
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.data_stores.main.state import StateFilter from synapse.storage.data_stores.main.state import StateFilter
from synapse.storage.data_stores.main.state_deltas import StateDeltasStore from synapse.storage.data_stores.main.state_deltas import StateDeltasStore
from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.storage.engines import PostgresEngine, Sqlite3Engine
@ -32,7 +31,7 @@ logger = logging.getLogger(__name__)
TEMP_TABLE = "_temp_populate_user_directory" TEMP_TABLE = "_temp_populate_user_directory"
class UserDirectoryBackgroundUpdateStore(StateDeltasStore, BackgroundUpdateStore): class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
# How many records do we calculate before sending it to # How many records do we calculate before sending it to
# add_users_who_share_private_rooms? # add_users_who_share_private_rooms?
@ -43,19 +42,19 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore, BackgroundUpdateStore
self.server_name = hs.hostname self.server_name = hs.hostname
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"populate_user_directory_createtables", "populate_user_directory_createtables",
self._populate_user_directory_createtables, self._populate_user_directory_createtables,
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"populate_user_directory_process_rooms", "populate_user_directory_process_rooms",
self._populate_user_directory_process_rooms, self._populate_user_directory_process_rooms,
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"populate_user_directory_process_users", "populate_user_directory_process_users",
self._populate_user_directory_process_users, self._populate_user_directory_process_users,
) )
self.register_background_update_handler( self.db.updates.register_background_update_handler(
"populate_user_directory_cleanup", self._populate_user_directory_cleanup "populate_user_directory_cleanup", self._populate_user_directory_cleanup
) )
@ -108,7 +107,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore, BackgroundUpdateStore
) )
yield self.db.simple_insert(TEMP_TABLE + "_position", {"position": new_pos}) yield self.db.simple_insert(TEMP_TABLE + "_position", {"position": new_pos})
yield self._end_background_update("populate_user_directory_createtables") yield self.db.updates._end_background_update(
"populate_user_directory_createtables"
)
return 1 return 1
@defer.inlineCallbacks @defer.inlineCallbacks
@ -130,7 +131,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore, BackgroundUpdateStore
"populate_user_directory_cleanup", _delete_staging_area "populate_user_directory_cleanup", _delete_staging_area
) )
yield self._end_background_update("populate_user_directory_cleanup") yield self.db.updates._end_background_update("populate_user_directory_cleanup")
return 1 return 1
@defer.inlineCallbacks @defer.inlineCallbacks
@ -176,7 +177,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore, BackgroundUpdateStore
# No more rooms -- complete the transaction. # No more rooms -- complete the transaction.
if not rooms_to_work_on: if not rooms_to_work_on:
yield self._end_background_update("populate_user_directory_process_rooms") yield self.db.updates._end_background_update(
"populate_user_directory_process_rooms"
)
return 1 return 1
logger.info( logger.info(
@ -248,7 +251,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore, BackgroundUpdateStore
progress["remaining"] -= 1 progress["remaining"] -= 1
yield self.db.runInteraction( yield self.db.runInteraction(
"populate_user_directory", "populate_user_directory",
self._background_update_progress_txn, self.db.updates._background_update_progress_txn,
"populate_user_directory_process_rooms", "populate_user_directory_process_rooms",
progress, progress,
) )
@ -267,7 +270,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore, BackgroundUpdateStore
If search_all_users is enabled, add all of the users to the user directory. If search_all_users is enabled, add all of the users to the user directory.
""" """
if not self.hs.config.user_directory_search_all_users: if not self.hs.config.user_directory_search_all_users:
yield self._end_background_update("populate_user_directory_process_users") yield self.db.updates._end_background_update(
"populate_user_directory_process_users"
)
return 1 return 1
def _get_next_batch(txn): def _get_next_batch(txn):
@ -297,7 +302,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore, BackgroundUpdateStore
# No more users -- complete the transaction. # No more users -- complete the transaction.
if not users_to_work_on: if not users_to_work_on:
yield self._end_background_update("populate_user_directory_process_users") yield self.db.updates._end_background_update(
"populate_user_directory_process_users"
)
return 1 return 1
logger.info( logger.info(
@ -317,7 +324,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore, BackgroundUpdateStore
progress["remaining"] -= 1 progress["remaining"] -= 1
yield self.db.runInteraction( yield self.db.runInteraction(
"populate_user_directory", "populate_user_directory",
self._background_update_progress_txn, self.db.updates._background_update_progress_txn,
"populate_user_directory_process_users", "populate_user_directory_process_users",
progress, progress,
) )

View file

@ -30,6 +30,7 @@ from twisted.internet import defer
from synapse.api.errors import StoreError from synapse.api.errors import StoreError
from synapse.logging.context import LoggingContext, make_deferred_yieldable from synapse.logging.context import LoggingContext, make_deferred_yieldable
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.background_updates import BackgroundUpdater
from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.util.stringutils import exception_to_unicode from synapse.util.stringutils import exception_to_unicode
@ -223,6 +224,8 @@ class Database(object):
self._clock = hs.get_clock() self._clock = hs.get_clock()
self._db_pool = hs.get_db_pool() self._db_pool = hs.get_db_pool()
self.updates = BackgroundUpdater(hs, self)
self._previous_txn_total_time = 0 self._previous_txn_total_time = 0
self._current_txn_total_time = 0 self._current_txn_total_time = 0
self._previous_loop_ts = 0 self._previous_loop_ts = 0

View file

@ -47,9 +47,9 @@ async def make_homeserver(reactor, config=None):
stor = hs.get_datastore() stor = hs.get_datastore()
# Run the database background updates. # Run the database background updates.
if hasattr(stor, "do_next_background_update"): if hasattr(stor.db.updates, "do_next_background_update"):
while not await stor.has_completed_background_updates(): while not await stor.db.updates.has_completed_background_updates():
await stor.do_next_background_update(1) await stor.db.updates.do_next_background_update(1)
def cleanup(): def cleanup():
for i in cleanup_tasks: for i in cleanup_tasks:

View file

@ -42,7 +42,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
Add the background updates we need to run. Add the background updates we need to run.
""" """
# Ugh, have to reset this flag # Ugh, have to reset this flag
self.store._all_done = False self.store.db.updates._all_done = False
self.get_success( self.get_success(
self.store.db.simple_insert( self.store.db.simple_insert(
@ -108,8 +108,12 @@ class StatsRoomTests(unittest.HomeserverTestCase):
# Do the initial population of the stats via the background update # Do the initial population of the stats via the background update
self._add_background_updates() self._add_background_updates()
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
def test_initial_room(self): def test_initial_room(self):
""" """
@ -141,8 +145,12 @@ class StatsRoomTests(unittest.HomeserverTestCase):
# Do the initial population of the user directory via the background update # Do the initial population of the user directory via the background update
self._add_background_updates() self._add_background_updates()
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
r = self.get_success(self.get_all_room_state()) r = self.get_success(self.get_all_room_state())
@ -178,7 +186,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
# the position that the deltas should begin at, once they take over. # the position that the deltas should begin at, once they take over.
self.hs.config.stats_enabled = True self.hs.config.stats_enabled = True
self.handler.stats_enabled = True self.handler.stats_enabled = True
self.store._all_done = False self.store.db.updates._all_done = False
self.get_success( self.get_success(
self.store.db.simple_update_one( self.store.db.simple_update_one(
table="stats_incremental_position", table="stats_incremental_position",
@ -194,8 +202,12 @@ class StatsRoomTests(unittest.HomeserverTestCase):
) )
) )
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
# Now, before the table is actually ingested, add some more events. # Now, before the table is actually ingested, add some more events.
self.helper.invite(room=room_1, src=u1, targ=u2, tok=u1_token) self.helper.invite(room=room_1, src=u1, targ=u2, tok=u1_token)
@ -221,9 +233,13 @@ class StatsRoomTests(unittest.HomeserverTestCase):
) )
) )
self.store._all_done = False self.store.db.updates._all_done = False
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
self.reactor.advance(86401) self.reactor.advance(86401)
@ -653,7 +669,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
# preparation stage of the initial background update # preparation stage of the initial background update
# Ugh, have to reset this flag # Ugh, have to reset this flag
self.store._all_done = False self.store.db.updates._all_done = False
self.get_success( self.get_success(
self.store.db.simple_delete( self.store.db.simple_delete(
@ -673,7 +689,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
# now do the background updates # now do the background updates
self.store._all_done = False self.store.db.updates._all_done = False
self.get_success( self.get_success(
self.store.db.simple_insert( self.store.db.simple_insert(
"background_updates", "background_updates",
@ -705,8 +721,12 @@ class StatsRoomTests(unittest.HomeserverTestCase):
) )
) )
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
r1stats_complete = self._get_current_stats("room", r1) r1stats_complete = self._get_current_stats("room", r1)
u1stats_complete = self._get_current_stats("user", u1) u1stats_complete = self._get_current_stats("user", u1)

View file

@ -181,7 +181,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
Add the background updates we need to run. Add the background updates we need to run.
""" """
# Ugh, have to reset this flag # Ugh, have to reset this flag
self.store._all_done = False self.store.db.updates._all_done = False
self.get_success( self.get_success(
self.store.db.simple_insert( self.store.db.simple_insert(
@ -255,8 +255,12 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
# Do the initial population of the user directory via the background update # Do the initial population of the user directory via the background update
self._add_background_updates() self._add_background_updates()
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
shares_private = self.get_users_who_share_private_rooms() shares_private = self.get_users_who_share_private_rooms()
public_users = self.get_users_in_public_rooms() public_users = self.get_users_in_public_rooms()
@ -290,8 +294,12 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
# Do the initial population of the user directory via the background update # Do the initial population of the user directory via the background update
self._add_background_updates() self._add_background_updates()
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
shares_private = self.get_users_who_share_private_rooms() shares_private = self.get_users_who_share_private_rooms()
public_users = self.get_users_in_public_rooms() public_users = self.get_users_in_public_rooms()

View file

@ -15,7 +15,7 @@ class BackgroundUpdateTestCase(unittest.TestCase):
self.update_handler = Mock() self.update_handler = Mock()
yield self.store.register_background_update_handler( yield self.store.db.updates.register_background_update_handler(
"test_update", self.update_handler "test_update", self.update_handler
) )
@ -23,7 +23,7 @@ class BackgroundUpdateTestCase(unittest.TestCase):
# (perhaps we should run them as part of the test HS setup, since we # (perhaps we should run them as part of the test HS setup, since we
# run all of the other schema setup stuff there?) # run all of the other schema setup stuff there?)
while True: while True:
res = yield self.store.do_next_background_update(1000) res = yield self.store.db.updates.do_next_background_update(1000)
if res is None: if res is None:
break break
@ -39,7 +39,7 @@ class BackgroundUpdateTestCase(unittest.TestCase):
progress = {"my_key": progress["my_key"] + 1} progress = {"my_key": progress["my_key"] + 1}
yield self.store.db.runInteraction( yield self.store.db.runInteraction(
"update_progress", "update_progress",
self.store._background_update_progress_txn, self.store.db.updates._background_update_progress_txn,
"test_update", "test_update",
progress, progress,
) )
@ -47,29 +47,37 @@ class BackgroundUpdateTestCase(unittest.TestCase):
self.update_handler.side_effect = update self.update_handler.side_effect = update
yield self.store.start_background_update("test_update", {"my_key": 1}) yield self.store.db.updates.start_background_update(
"test_update", {"my_key": 1}
)
self.update_handler.reset_mock() self.update_handler.reset_mock()
result = yield self.store.do_next_background_update(duration_ms * desired_count) result = yield self.store.db.updates.do_next_background_update(
duration_ms * desired_count
)
self.assertIsNotNone(result) self.assertIsNotNone(result)
self.update_handler.assert_called_once_with( self.update_handler.assert_called_once_with(
{"my_key": 1}, self.store.DEFAULT_BACKGROUND_BATCH_SIZE {"my_key": 1}, self.store.db.updates.DEFAULT_BACKGROUND_BATCH_SIZE
) )
# second step: complete the update # second step: complete the update
@defer.inlineCallbacks @defer.inlineCallbacks
def update(progress, count): def update(progress, count):
yield self.store._end_background_update("test_update") yield self.store.db.updates._end_background_update("test_update")
return count return count
self.update_handler.side_effect = update self.update_handler.side_effect = update
self.update_handler.reset_mock() self.update_handler.reset_mock()
result = yield self.store.do_next_background_update(duration_ms * desired_count) result = yield self.store.db.updates.do_next_background_update(
duration_ms * desired_count
)
self.assertIsNotNone(result) self.assertIsNotNone(result)
self.update_handler.assert_called_once_with({"my_key": 2}, desired_count) self.update_handler.assert_called_once_with({"my_key": 2}, desired_count)
# third step: we don't expect to be called any more # third step: we don't expect to be called any more
self.update_handler.reset_mock() self.update_handler.reset_mock()
result = yield self.store.do_next_background_update(duration_ms * desired_count) result = yield self.store.db.updates.do_next_background_update(
duration_ms * desired_count
)
self.assertIsNone(result) self.assertIsNone(result)
self.assertFalse(self.update_handler.called) self.assertFalse(self.update_handler.called)

View file

@ -46,7 +46,9 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
"""Re run the background update to clean up the extremities. """Re run the background update to clean up the extremities.
""" """
# Make sure we don't clash with in progress updates. # Make sure we don't clash with in progress updates.
self.assertTrue(self.store._all_done, "Background updates are still ongoing") self.assertTrue(
self.store.db.updates._all_done, "Background updates are still ongoing"
)
schema_path = os.path.join( schema_path = os.path.join(
prepare_database.dir_path, prepare_database.dir_path,
@ -68,10 +70,14 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
) )
# Ugh, have to reset this flag # Ugh, have to reset this flag
self.store._all_done = False self.store.db.updates._all_done = False
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
def test_soft_failed_extremities_handled_correctly(self): def test_soft_failed_extremities_handled_correctly(self):
"""Test that extremities are correctly calculated in the presence of """Test that extremities are correctly calculated in the presence of

View file

@ -202,8 +202,12 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
def test_devices_last_seen_bg_update(self): def test_devices_last_seen_bg_update(self):
# First make sure we have completed all updates. # First make sure we have completed all updates.
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
# Insert a user IP # Insert a user IP
user_id = "@user:id" user_id = "@user:id"
@ -256,11 +260,15 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
) )
# ... and tell the DataStore that it hasn't finished all updates yet # ... and tell the DataStore that it hasn't finished all updates yet
self.store._all_done = False self.store.db.updates._all_done = False
# Now let's actually drive the updates to completion # Now let's actually drive the updates to completion
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
# We should now get the correct result again # We should now get the correct result again
result = self.get_success( result = self.get_success(
@ -281,8 +289,12 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
def test_old_user_ips_pruned(self): def test_old_user_ips_pruned(self):
# First make sure we have completed all updates. # First make sure we have completed all updates.
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
# Insert a user IP # Insert a user IP
user_id = "@user:id" user_id = "@user:id"

View file

@ -122,8 +122,12 @@ class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase):
def test_can_rerun_update(self): def test_can_rerun_update(self):
# First make sure we have completed all updates. # First make sure we have completed all updates.
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)
# Now let's create a room, which will insert a membership # Now let's create a room, which will insert a membership
user = UserID("alice", "test") user = UserID("alice", "test")
@ -143,8 +147,12 @@ class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase):
) )
# ... and tell the DataStore that it hasn't finished all updates yet # ... and tell the DataStore that it hasn't finished all updates yet
self.store._all_done = False self.store.db.updates._all_done = False
# Now let's actually drive the updates to completion # Now let's actually drive the updates to completion
while not self.get_success(self.store.has_completed_background_updates()): while not self.get_success(
self.get_success(self.store.do_next_background_update(100), by=0.1) self.store.db.updates.has_completed_background_updates()
):
self.get_success(
self.store.db.updates.do_next_background_update(100), by=0.1
)

View file

@ -401,10 +401,12 @@ class HomeserverTestCase(TestCase):
hs = setup_test_homeserver(self.addCleanup, *args, **kwargs) hs = setup_test_homeserver(self.addCleanup, *args, **kwargs)
stor = hs.get_datastore() stor = hs.get_datastore()
# Run the database background updates. # Run the database background updates, when running against "master".
if hasattr(stor, "do_next_background_update"): if hs.__class__.__name__ == "TestHomeServer":
while not self.get_success(stor.has_completed_background_updates()): while not self.get_success(
self.get_success(stor.do_next_background_update(1)) stor.db.updates.has_completed_background_updates()
):
self.get_success(stor.db.updates.do_next_background_update(1))
return hs return hs