2022-08-26 10:54:38 +00:00
|
|
|
import time
|
|
|
|
import pytest
|
2022-10-24 18:22:43 +00:00
|
|
|
import os
|
|
|
|
import glob
|
2023-08-01 08:30:17 +00:00
|
|
|
import asyncio
|
2023-05-13 07:44:25 +00:00
|
|
|
from redis import asyncio as aioredis
|
2022-08-26 10:54:38 +00:00
|
|
|
from pathlib import Path
|
2023-10-06 09:24:56 +00:00
|
|
|
import boto3
|
|
|
|
import logging
|
2022-11-06 14:27:43 +00:00
|
|
|
|
|
|
|
from . import dfly_args
|
2023-01-09 20:31:15 +00:00
|
|
|
from .utility import DflySeeder, wait_available_async
|
2022-08-26 10:54:38 +00:00
|
|
|
|
2022-10-31 14:39:20 +00:00
|
|
|
BASIC_ARGS = {"dir": "{DRAGONFLY_TMP}/"}
|
2023-01-09 20:31:15 +00:00
|
|
|
|
2023-01-12 11:38:05 +00:00
|
|
|
SEEDER_ARGS = dict(keys=12_000, dbcount=5, multi_transaction_probability=0)
|
2022-10-24 18:22:43 +00:00
|
|
|
|
|
|
|
|
|
|
|
class SnapshotTestBase:
|
2022-08-26 10:54:38 +00:00
|
|
|
def setup(self, tmp_dir: Path):
|
2022-10-24 18:22:43 +00:00
|
|
|
self.tmp_dir = tmp_dir
|
2022-08-26 10:54:38 +00:00
|
|
|
|
2023-04-17 11:59:44 +00:00
|
|
|
def get_main_file(self, pattern):
|
2023-07-17 10:13:12 +00:00
|
|
|
def is_main(f):
|
|
|
|
return "summary" in f if pattern.endswith("dfs") else True
|
|
|
|
|
|
|
|
files = glob.glob(str(self.tmp_dir.absolute()) + "/" + pattern)
|
2023-04-17 11:59:44 +00:00
|
|
|
possible_mains = list(filter(is_main, files))
|
|
|
|
assert len(possible_mains) == 1, possible_mains
|
|
|
|
return possible_mains[0]
|
2022-10-24 18:22:43 +00:00
|
|
|
|
2023-08-01 08:30:17 +00:00
|
|
|
async def wait_for_save(self, pattern):
|
|
|
|
while True:
|
|
|
|
files = glob.glob(str(self.tmp_dir.absolute()) + "/" + pattern)
|
|
|
|
if not len(files) == 0:
|
|
|
|
break
|
|
|
|
await asyncio.sleep(1)
|
|
|
|
|
2022-10-24 18:22:43 +00:00
|
|
|
|
2023-04-17 11:59:44 +00:00
|
|
|
@dfly_args({**BASIC_ARGS, "dbfilename": "test-rdb-{{timestamp}}"})
|
2022-10-24 18:22:43 +00:00
|
|
|
class TestRdbSnapshot(SnapshotTestBase):
|
|
|
|
"""Test single file rdb snapshot"""
|
2023-07-17 10:13:12 +00:00
|
|
|
|
2022-10-24 18:22:43 +00:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
super().setup(tmp_dir)
|
|
|
|
|
2023-01-09 20:31:15 +00:00
|
|
|
@pytest.mark.asyncio
|
2023-08-23 10:04:49 +00:00
|
|
|
@pytest.mark.slow
|
2023-01-09 20:31:15 +00:00
|
|
|
async def test_snapshot(self, df_seeder_factory, async_client, df_server):
|
|
|
|
seeder = df_seeder_factory.create(port=df_server.port, **SEEDER_ARGS)
|
|
|
|
await seeder.run(target_deviation=0.1)
|
|
|
|
|
|
|
|
start_capture = await seeder.capture()
|
2022-10-24 18:22:43 +00:00
|
|
|
|
|
|
|
# save + flush + load
|
2023-02-15 08:15:38 +00:00
|
|
|
await async_client.execute_command("SAVE RDB")
|
2023-01-09 20:31:15 +00:00
|
|
|
assert await async_client.flushall()
|
2023-04-17 11:59:44 +00:00
|
|
|
await async_client.execute_command("DEBUG LOAD " + super().get_main_file("test-rdb-*.rdb"))
|
|
|
|
|
2023-09-18 07:23:49 +00:00
|
|
|
assert await seeder.compare(start_capture, port=df_server.port)
|
2023-04-17 11:59:44 +00:00
|
|
|
|
|
|
|
|
2023-04-18 09:48:30 +00:00
|
|
|
@dfly_args({**BASIC_ARGS, "dbfilename": "test-rdbexact.rdb", "nodf_snapshot_format": None})
|
2023-04-17 11:59:44 +00:00
|
|
|
class TestRdbSnapshotExactFilename(SnapshotTestBase):
|
|
|
|
"""Test single file rdb snapshot without a timestamp"""
|
2023-07-17 10:13:12 +00:00
|
|
|
|
2023-04-17 11:59:44 +00:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
super().setup(tmp_dir)
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
2023-08-23 10:04:49 +00:00
|
|
|
@pytest.mark.slow
|
2023-04-17 11:59:44 +00:00
|
|
|
async def test_snapshot(self, df_seeder_factory, async_client, df_server):
|
|
|
|
seeder = df_seeder_factory.create(port=df_server.port, **SEEDER_ARGS)
|
|
|
|
await seeder.run(target_deviation=0.1)
|
|
|
|
|
|
|
|
start_capture = await seeder.capture()
|
|
|
|
|
|
|
|
# save + flush + load
|
|
|
|
await async_client.execute_command("SAVE RDB")
|
|
|
|
assert await async_client.flushall()
|
|
|
|
main_file = super().get_main_file("test-rdbexact.rdb")
|
|
|
|
await async_client.execute_command("DEBUG LOAD " + main_file)
|
2022-10-24 18:22:43 +00:00
|
|
|
|
2023-09-18 07:23:49 +00:00
|
|
|
assert await seeder.compare(start_capture, port=df_server.port)
|
2022-10-24 18:22:43 +00:00
|
|
|
|
2022-10-31 14:39:20 +00:00
|
|
|
|
2023-01-09 20:31:15 +00:00
|
|
|
@dfly_args({**BASIC_ARGS, "dbfilename": "test-dfs"})
|
2022-10-24 18:22:43 +00:00
|
|
|
class TestDflySnapshot(SnapshotTestBase):
|
|
|
|
"""Test multi file snapshot"""
|
2023-07-17 10:13:12 +00:00
|
|
|
|
2022-10-24 18:22:43 +00:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
self.tmp_dir = tmp_dir
|
|
|
|
|
2023-01-09 20:31:15 +00:00
|
|
|
@pytest.mark.asyncio
|
2023-08-23 10:04:49 +00:00
|
|
|
@pytest.mark.slow
|
2023-01-09 20:31:15 +00:00
|
|
|
async def test_snapshot(self, df_seeder_factory, async_client, df_server):
|
|
|
|
seeder = df_seeder_factory.create(port=df_server.port, **SEEDER_ARGS)
|
|
|
|
await seeder.run(target_deviation=0.1)
|
|
|
|
|
|
|
|
start_capture = await seeder.capture()
|
2022-10-24 18:22:43 +00:00
|
|
|
|
|
|
|
# save + flush + load
|
2023-01-09 20:31:15 +00:00
|
|
|
await async_client.execute_command("SAVE DF")
|
|
|
|
assert await async_client.flushall()
|
2023-07-17 10:13:12 +00:00
|
|
|
await async_client.execute_command(
|
|
|
|
"DEBUG LOAD " + super().get_main_file("test-dfs-summary.dfs")
|
|
|
|
)
|
2022-10-24 18:22:43 +00:00
|
|
|
|
2023-09-18 07:23:49 +00:00
|
|
|
assert await seeder.compare(start_capture, port=df_server.port)
|
2022-10-24 18:22:43 +00:00
|
|
|
|
2023-07-17 10:13:12 +00:00
|
|
|
|
2023-04-17 21:12:38 +00:00
|
|
|
# We spawn instances manually, so reduce memory usage of default to minimum
|
2023-04-20 04:30:42 +00:00
|
|
|
|
|
|
|
|
2023-04-17 21:12:38 +00:00
|
|
|
@dfly_args({"proactor_threads": "1"})
|
2023-04-17 11:59:44 +00:00
|
|
|
class TestDflyAutoLoadSnapshot(SnapshotTestBase):
|
|
|
|
"""Test automatic loading of dump files on startup with timestamp"""
|
2023-07-17 10:13:12 +00:00
|
|
|
|
2023-04-17 11:59:44 +00:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
self.tmp_dir = tmp_dir
|
|
|
|
|
|
|
|
cases = [
|
|
|
|
("rdb", "test-autoload1-{{timestamp}}"),
|
|
|
|
("df", "test-autoload2-{{timestamp}}"),
|
|
|
|
("rdb", "test-autoload3-{{timestamp}}.rdb"),
|
|
|
|
("rdb", "test-autoload4"),
|
|
|
|
("df", "test-autoload5"),
|
|
|
|
("rdb", "test-autoload6.rdb"),
|
|
|
|
]
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
@pytest.mark.parametrize("save_type, dbfilename", cases)
|
|
|
|
async def test_snapshot(self, df_local_factory, save_type, dbfilename):
|
2023-04-17 21:12:38 +00:00
|
|
|
df_args = {"dbfilename": dbfilename, **BASIC_ARGS, "port": 1111}
|
2023-07-17 10:13:12 +00:00
|
|
|
if save_type == "rdb":
|
2023-08-18 06:28:19 +00:00
|
|
|
df_args["nodf_snapshot_format"] = None
|
2023-09-18 10:52:56 +00:00
|
|
|
with df_local_factory.create(**df_args) as df_server:
|
|
|
|
async with df_server.client() as client:
|
2023-11-02 10:17:08 +00:00
|
|
|
await wait_available_async(client)
|
2023-09-18 10:52:56 +00:00
|
|
|
await client.set("TEST", hash(dbfilename))
|
|
|
|
await client.execute_command("SAVE " + save_type)
|
|
|
|
|
|
|
|
with df_local_factory.create(**df_args) as df_server:
|
|
|
|
async with df_server.client() as client:
|
2023-11-02 10:17:08 +00:00
|
|
|
await wait_available_async(client)
|
2023-09-18 10:52:56 +00:00
|
|
|
response = await client.get("TEST")
|
2023-11-03 09:00:26 +00:00
|
|
|
assert response == str(hash(dbfilename))
|
2023-04-17 11:59:44 +00:00
|
|
|
|
|
|
|
|
2023-08-02 09:58:45 +00:00
|
|
|
# save every 1 minute
|
|
|
|
@dfly_args({**BASIC_ARGS, "dbfilename": "test-cron", "snapshot_cron": "* * * * *"})
|
2023-07-31 17:26:01 +00:00
|
|
|
class TestCronPeriodicSnapshot(SnapshotTestBase):
|
|
|
|
"""Test periodic snapshotting"""
|
|
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
super().setup(tmp_dir)
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
2023-08-23 10:04:49 +00:00
|
|
|
@pytest.mark.slow
|
2023-07-31 17:26:01 +00:00
|
|
|
async def test_snapshot(self, df_seeder_factory, df_server):
|
|
|
|
seeder = df_seeder_factory.create(
|
|
|
|
port=df_server.port, keys=10, multi_transaction_probability=0
|
|
|
|
)
|
|
|
|
await seeder.run(target_deviation=0.5)
|
|
|
|
|
2023-08-01 08:30:17 +00:00
|
|
|
await super().wait_for_save("test-cron-summary.dfs")
|
2023-07-31 17:26:01 +00:00
|
|
|
|
2023-08-01 08:30:17 +00:00
|
|
|
assert super().get_main_file("test-cron-summary.dfs")
|
2023-07-31 17:26:01 +00:00
|
|
|
|
|
|
|
|
2023-11-03 08:10:16 +00:00
|
|
|
@dfly_args({**BASIC_ARGS, "dbfilename": "test-set-snapshot_cron"})
|
|
|
|
class TestSetsnapshot_cron(SnapshotTestBase):
|
|
|
|
"""Test set snapshot_cron flag"""
|
|
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
super().setup(tmp_dir)
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
@pytest.mark.slow
|
|
|
|
async def test_snapshot(self, df_seeder_factory, async_client, df_server):
|
|
|
|
seeder = df_seeder_factory.create(
|
|
|
|
port=df_server.port, keys=10, multi_transaction_probability=0
|
|
|
|
)
|
|
|
|
await seeder.run(target_deviation=0.5)
|
|
|
|
|
|
|
|
await async_client.execute_command("CONFIG", "SET", "snapshot_cron", "* * * * *")
|
|
|
|
|
|
|
|
await super().wait_for_save("test-set-snapshot_cron-summary.dfs")
|
|
|
|
|
|
|
|
assert super().get_main_file("test-set-snapshot_cron-summary.dfs")
|
|
|
|
|
|
|
|
|
2024-01-07 10:21:09 +00:00
|
|
|
@dfly_args(
|
|
|
|
{**BASIC_ARGS, "dbfilename": "test-save-rename-command", "rename_command": "save=save-foo"}
|
|
|
|
)
|
|
|
|
class TestSnapshotShutdownWithRenameSave(SnapshotTestBase):
|
|
|
|
"""Test set snapshot_cron flag"""
|
|
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
super().setup(tmp_dir)
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_snapshot(self, df_server, df_seeder_factory):
|
|
|
|
"""Checks that on shutdown we save snapshot"""
|
|
|
|
seeder = df_seeder_factory.create(port=df_server.port)
|
|
|
|
await seeder.run(target_deviation=0.1)
|
|
|
|
|
|
|
|
start_capture = await seeder.capture()
|
|
|
|
a_client = aioredis.Redis(port=df_server.port)
|
|
|
|
|
|
|
|
df_server.stop()
|
|
|
|
df_server.start()
|
|
|
|
|
|
|
|
a_client = aioredis.Redis(port=df_server.port)
|
|
|
|
await wait_available_async(a_client)
|
|
|
|
await a_client.connection_pool.disconnect()
|
|
|
|
|
|
|
|
assert await seeder.compare(start_capture, port=df_server.port)
|
|
|
|
|
|
|
|
|
2023-11-21 11:56:27 +00:00
|
|
|
@dfly_args({**BASIC_ARGS})
|
|
|
|
class TestOnlyOneSaveAtATime(SnapshotTestBase):
|
|
|
|
"""Dragonfly does not allow simultaneous save operations, send 2 save operations and make sure one is rejected"""
|
|
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
super().setup(tmp_dir)
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
@pytest.mark.slow
|
|
|
|
async def test_snapshot(self, async_client, df_server):
|
|
|
|
await async_client.execute_command(
|
|
|
|
"debug", "populate", "1000000", "askldjh", "1000", "RAND"
|
|
|
|
)
|
|
|
|
|
|
|
|
async def save():
|
|
|
|
try:
|
|
|
|
res = await async_client.execute_command("save", "rdb", "dump")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
|
|
return False
|
|
|
|
|
|
|
|
save_commnands = [asyncio.create_task(save()) for _ in range(2)]
|
|
|
|
|
|
|
|
num_successes = 0
|
|
|
|
for result in asyncio.as_completed(save_commnands):
|
|
|
|
num_successes += await result
|
|
|
|
|
|
|
|
assert num_successes == 1, "Only one SAVE must be successful"
|
|
|
|
|
|
|
|
|
2023-04-19 04:21:23 +00:00
|
|
|
@dfly_args({**BASIC_ARGS})
|
|
|
|
class TestPathEscapes(SnapshotTestBase):
|
|
|
|
"""Test that we don't allow path escapes. We just check that df_server.start()
|
|
|
|
fails because we don't have a much better way to test that."""
|
2023-07-17 10:13:12 +00:00
|
|
|
|
2023-04-19 04:21:23 +00:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
super().setup(tmp_dir)
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_snapshot(self, df_local_factory):
|
2023-07-17 10:13:12 +00:00
|
|
|
df_server = df_local_factory.create(dbfilename="../../../../etc/passwd")
|
2023-04-19 04:21:23 +00:00
|
|
|
try:
|
|
|
|
df_server.start()
|
|
|
|
assert False, "Server should not start correctly"
|
|
|
|
except Exception as e:
|
|
|
|
pass
|
2023-04-20 04:30:42 +00:00
|
|
|
|
|
|
|
|
|
|
|
@dfly_args({**BASIC_ARGS, "dbfilename": "test-shutdown"})
|
|
|
|
class TestDflySnapshotOnShutdown(SnapshotTestBase):
|
|
|
|
"""Test multi file snapshot"""
|
2023-07-17 10:13:12 +00:00
|
|
|
|
2023-04-20 04:30:42 +00:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
self.tmp_dir = tmp_dir
|
|
|
|
|
2023-12-25 07:49:57 +00:00
|
|
|
async def _get_info_memory_fields(self, client):
|
|
|
|
res = await client.execute_command("INFO MEMORY")
|
|
|
|
fields = {}
|
|
|
|
for line in res.decode("ascii").splitlines():
|
|
|
|
if line.startswith("#"):
|
|
|
|
continue
|
|
|
|
k, v = line.split(":")
|
|
|
|
if k == "object_used_memory" or k.startswith("type_used_memory_"):
|
|
|
|
fields.update({k: int(v)})
|
|
|
|
return fields
|
|
|
|
|
|
|
|
async def _delete_all_keys(self, client):
|
|
|
|
# Delete all keys from all DBs
|
|
|
|
for i in range(0, SEEDER_ARGS["dbcount"]):
|
|
|
|
await client.select(i)
|
|
|
|
while True:
|
|
|
|
keys = await client.keys("*")
|
|
|
|
if len(keys) == 0:
|
|
|
|
break
|
|
|
|
await client.delete(*keys)
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_memory_counters(self, df_seeder_factory, df_server):
|
|
|
|
a_client = aioredis.Redis(port=df_server.port)
|
|
|
|
|
|
|
|
memory_counters = await self._get_info_memory_fields(a_client)
|
|
|
|
assert memory_counters == {"object_used_memory": 0}
|
|
|
|
|
|
|
|
seeder = df_seeder_factory.create(port=df_server.port, **SEEDER_ARGS)
|
|
|
|
await seeder.run(target_deviation=0.1)
|
|
|
|
|
|
|
|
memory_counters = await self._get_info_memory_fields(a_client)
|
|
|
|
assert all(value > 0 for value in memory_counters.values())
|
|
|
|
|
|
|
|
await self._delete_all_keys(a_client)
|
|
|
|
memory_counters = await self._get_info_memory_fields(a_client)
|
|
|
|
assert memory_counters == {"object_used_memory": 0}
|
|
|
|
|
2023-04-20 04:30:42 +00:00
|
|
|
@pytest.mark.asyncio
|
2023-08-23 10:04:49 +00:00
|
|
|
@pytest.mark.slow
|
2023-04-20 04:30:42 +00:00
|
|
|
async def test_snapshot(self, df_seeder_factory, df_server):
|
2023-12-25 07:49:57 +00:00
|
|
|
"""Checks that:
|
|
|
|
1. After reloading the snapshot file the data is the same
|
|
|
|
2. Memory counters after loading from snapshot is similar to before creating a snapshot
|
|
|
|
3. Memory counters after deleting all keys loaded by snapshot - this validates the memory
|
|
|
|
counting when loading from snapshot."""
|
2023-04-20 04:30:42 +00:00
|
|
|
seeder = df_seeder_factory.create(port=df_server.port, **SEEDER_ARGS)
|
|
|
|
await seeder.run(target_deviation=0.1)
|
|
|
|
|
|
|
|
start_capture = await seeder.capture()
|
2023-12-25 07:49:57 +00:00
|
|
|
a_client = aioredis.Redis(port=df_server.port)
|
|
|
|
memory_before = await self._get_info_memory_fields(a_client)
|
2023-04-20 04:30:42 +00:00
|
|
|
|
|
|
|
df_server.stop()
|
|
|
|
df_server.start()
|
|
|
|
|
|
|
|
a_client = aioredis.Redis(port=df_server.port)
|
|
|
|
await wait_available_async(a_client)
|
|
|
|
await a_client.connection_pool.disconnect()
|
|
|
|
|
2023-09-18 07:23:49 +00:00
|
|
|
assert await seeder.compare(start_capture, port=df_server.port)
|
2023-12-25 07:49:57 +00:00
|
|
|
memory_after = await self._get_info_memory_fields(a_client)
|
|
|
|
for counter, value in memory_before.items():
|
|
|
|
# Unfortunately memory usage sometimes depends on order of insertion / deletion, so
|
|
|
|
# it's usually not exactly the same. For the test to be stable we check that it's
|
|
|
|
# at least 50% that of the original value.
|
|
|
|
assert memory_after[counter] >= 0.5 * value
|
|
|
|
|
|
|
|
await self._delete_all_keys(a_client)
|
|
|
|
memory_empty = await self._get_info_memory_fields(a_client)
|
|
|
|
assert memory_empty == {"object_used_memory": 0}
|
2023-06-21 20:35:36 +00:00
|
|
|
|
2023-07-17 10:13:12 +00:00
|
|
|
|
2023-06-21 20:35:36 +00:00
|
|
|
@dfly_args({**BASIC_ARGS, "dbfilename": "test-info-persistence"})
|
|
|
|
class TestDflyInfoPersistenceLoadingField(SnapshotTestBase):
|
|
|
|
"""Test is_loading field on INFO PERSISTENCE during snapshot loading"""
|
2023-07-17 10:13:12 +00:00
|
|
|
|
2023-06-21 20:35:36 +00:00
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
self.tmp_dir = tmp_dir
|
|
|
|
|
|
|
|
def extract_is_loading_field(self, res):
|
2023-07-17 10:13:12 +00:00
|
|
|
matcher = b"loading:"
|
2023-06-21 20:35:36 +00:00
|
|
|
start = res.find(matcher)
|
|
|
|
pos = start + len(matcher)
|
|
|
|
return chr(res[pos])
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_snapshot(self, df_seeder_factory, df_server):
|
|
|
|
seeder = df_seeder_factory.create(port=df_server.port, **SEEDER_ARGS)
|
2023-06-23 13:04:44 +00:00
|
|
|
await seeder.run(target_deviation=0.05)
|
|
|
|
a_client = aioredis.Redis(port=df_server.port)
|
2023-06-21 20:35:36 +00:00
|
|
|
|
2023-07-17 10:13:12 +00:00
|
|
|
# Wait for snapshot to finish loading and try INFO PERSISTENCE
|
2023-06-21 20:35:36 +00:00
|
|
|
await wait_available_async(a_client)
|
|
|
|
res = await a_client.execute_command("INFO PERSISTENCE")
|
2023-07-17 10:13:12 +00:00
|
|
|
assert "0" == self.extract_is_loading_field(res)
|
2023-06-21 20:35:36 +00:00
|
|
|
|
|
|
|
await a_client.connection_pool.disconnect()
|
2023-10-06 09:24:56 +00:00
|
|
|
|
|
|
|
|
|
|
|
# If DRAGONFLY_S3_BUCKET is configured, AWS credentials must also be
|
|
|
|
# configured.
|
|
|
|
@pytest.mark.skipif(
|
|
|
|
"DRAGONFLY_S3_BUCKET" not in os.environ, reason="AWS S3 snapshots bucket is not configured"
|
|
|
|
)
|
|
|
|
@dfly_args({"dir": "s3://{DRAGONFLY_S3_BUCKET}{DRAGONFLY_TMP}", "dbfilename": ""})
|
|
|
|
class TestS3Snapshot:
|
|
|
|
"""Test a snapshot using S3 storage"""
|
|
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def setup(self, tmp_dir: Path):
|
|
|
|
self.tmp_dir = tmp_dir
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
@pytest.mark.slow
|
|
|
|
async def test_snapshot(self, df_seeder_factory, async_client, df_server):
|
|
|
|
seeder = df_seeder_factory.create(port=df_server.port, **SEEDER_ARGS)
|
|
|
|
await seeder.run(target_deviation=0.1)
|
|
|
|
|
|
|
|
start_capture = await seeder.capture()
|
|
|
|
|
|
|
|
try:
|
|
|
|
# save + flush + load
|
|
|
|
await async_client.execute_command("SAVE DF snapshot")
|
|
|
|
assert await async_client.flushall()
|
|
|
|
await async_client.execute_command(
|
|
|
|
"DEBUG LOAD "
|
|
|
|
+ os.environ["DRAGONFLY_S3_BUCKET"]
|
|
|
|
+ str(self.tmp_dir)
|
|
|
|
+ "/snapshot-summary.dfs"
|
|
|
|
)
|
|
|
|
|
|
|
|
assert await seeder.compare(start_capture, port=df_server.port)
|
|
|
|
finally:
|
|
|
|
self._delete_objects(
|
|
|
|
os.environ["DRAGONFLY_S3_BUCKET"],
|
|
|
|
str(self.tmp_dir)[1:],
|
|
|
|
)
|
|
|
|
|
|
|
|
def _delete_objects(self, bucket, prefix):
|
|
|
|
client = boto3.client("s3")
|
|
|
|
resp = client.list_objects_v2(
|
|
|
|
Bucket=bucket,
|
|
|
|
Prefix=prefix,
|
|
|
|
)
|
|
|
|
keys = []
|
|
|
|
for obj in resp["Contents"]:
|
|
|
|
keys.append({"Key": obj["Key"]})
|
|
|
|
client.delete_objects(
|
|
|
|
Bucket=bucket,
|
|
|
|
Delete={"Objects": keys},
|
|
|
|
)
|