mirror of
https://github.com/element-hq/synapse.git
synced 2024-12-14 11:57:44 +00:00
Start adding some tests
This commit is contained in:
parent
d516d68b29
commit
192e228a98
2 changed files with 119 additions and 10 deletions
|
@ -49,6 +49,8 @@ This is all tied together by the AppServiceScheduler which DIs the required
|
|||
components.
|
||||
"""
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
|
||||
class AppServiceScheduler(object):
|
||||
""" Public facing API for this module. Does the required DI to tie the
|
||||
|
@ -105,11 +107,14 @@ class _EventGrouper(object):
|
|||
self.groups = {} # dict of {service: [events]}
|
||||
|
||||
def on_receive(self, service, event):
|
||||
# TODO group this
|
||||
pass
|
||||
if service not in self.groups:
|
||||
self.groups[service] = []
|
||||
self.groups[service].append(event)
|
||||
|
||||
def drain_groups(self):
|
||||
return self.groups
|
||||
groups = self.groups
|
||||
self.groups = {}
|
||||
return groups
|
||||
|
||||
|
||||
class _TransactionController(object):
|
||||
|
@ -135,7 +140,6 @@ class _TransactionController(object):
|
|||
self._start_recoverer(service)
|
||||
self.clock.call_later(1000, self.start_polling)
|
||||
|
||||
|
||||
def on_recovered(self, service):
|
||||
# TODO mark AS as UP
|
||||
pass
|
||||
|
@ -172,26 +176,25 @@ class _Recoverer(object):
|
|||
def recover(self):
|
||||
self.clock.call_later(2000 ** self.backoff_counter, self.retry)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def retry(self):
|
||||
txn = self._get_oldest_txn()
|
||||
txn = yield self._get_oldest_txn()
|
||||
if txn:
|
||||
if txn.send(self.as_api):
|
||||
txn.complete(self.store)
|
||||
# reset the backoff counter and retry immediately
|
||||
self.backoff_counter = 1
|
||||
self.retry()
|
||||
return
|
||||
else:
|
||||
self.backoff_counter += 1
|
||||
self.recover()
|
||||
return
|
||||
else:
|
||||
self._set_service_recovered()
|
||||
|
||||
def _set_service_recovered(self):
|
||||
self.callback(self.service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_oldest_txn(self):
|
||||
pass # returns AppServiceTransaction
|
||||
|
||||
|
||||
txn = yield self.store.get_oldest_txn(self.service)
|
||||
defer.returnValue(txn)
|
||||
|
|
106
tests/appservice/test_scheduler.py
Normal file
106
tests/appservice/test_scheduler.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.appservice.scheduler import (
|
||||
AppServiceScheduler, AppServiceTransaction, _EventGrouper,
|
||||
_TransactionController, _Recoverer
|
||||
)
|
||||
from twisted.internet import defer
|
||||
from ..utils import MockClock
|
||||
from mock import Mock
|
||||
from tests import unittest
|
||||
|
||||
class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.clock = MockClock()
|
||||
self.as_api = Mock()
|
||||
self.store = Mock()
|
||||
self.service = Mock()
|
||||
self.callback = Mock()
|
||||
self.recoverer = _Recoverer(
|
||||
clock=self.clock,
|
||||
as_api=self.as_api,
|
||||
store=self.store,
|
||||
service=self.service,
|
||||
callback=self.callback,
|
||||
)
|
||||
|
||||
def test_recover_service_single_txn(self):
|
||||
txns = self._mk_txns(1)
|
||||
self.store.get_oldest_txn = Mock(return_value=defer.succeed(txns[0]))
|
||||
|
||||
self.recoverer.recover()
|
||||
self.assertEquals(0, self.store.get_oldest_txn.call_count)
|
||||
self.clock.advance_time(2000)
|
||||
self.assertEquals(2, self.store.get_oldest_txn.call_count)
|
||||
|
||||
def _mk_txns(self, num_txns):
|
||||
return [
|
||||
Mock() for i in range(num_txns)
|
||||
]
|
||||
|
||||
|
||||
|
||||
class ApplicationServiceSchedulerEventGrouperTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.grouper = _EventGrouper()
|
||||
|
||||
def test_drain_single_event(self):
|
||||
service = Mock()
|
||||
event = Mock()
|
||||
self.grouper.on_receive(service, event)
|
||||
groups = self.grouper.drain_groups()
|
||||
self.assertTrue(service in groups)
|
||||
self.assertEquals([event], groups[service])
|
||||
self.assertEquals(1, len(groups.keys()))
|
||||
# no more events
|
||||
self.assertEquals(self.grouper.drain_groups(), {})
|
||||
|
||||
def test_drain_multiple_events(self):
|
||||
service = Mock()
|
||||
events = [Mock(), Mock(), Mock()]
|
||||
for e in events:
|
||||
self.grouper.on_receive(service, e)
|
||||
groups = self.grouper.drain_groups()
|
||||
self.assertTrue(service in groups)
|
||||
self.assertEquals(events, groups[service])
|
||||
# no more events
|
||||
self.assertEquals(self.grouper.drain_groups(), {})
|
||||
|
||||
def test_drain_multiple_services(self):
|
||||
services = [Mock(), Mock(), Mock()]
|
||||
events_a = [Mock(), Mock()]
|
||||
events_b = [Mock()]
|
||||
events_c = [Mock(), Mock(), Mock(), Mock()]
|
||||
mappings = {
|
||||
services[0]: events_a,
|
||||
services[1]: events_b,
|
||||
services[2]: events_c
|
||||
}
|
||||
for e in events_b:
|
||||
self.grouper.on_receive(services[1], e)
|
||||
for e in events_c:
|
||||
self.grouper.on_receive(services[2], e)
|
||||
for e in events_a:
|
||||
self.grouper.on_receive(services[0], e)
|
||||
|
||||
groups = self.grouper.drain_groups()
|
||||
for service in services:
|
||||
self.assertTrue(service in groups)
|
||||
self.assertEquals(mappings[service], groups[service])
|
||||
self.assertEquals(3, len(groups.keys()))
|
||||
# no more events
|
||||
self.assertEquals(self.grouper.drain_groups(), {})
|
Loading…
Reference in a new issue