mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2024-12-14 11:58:02 +00:00
refactor: set info_replication_valkey_compatible=true (#3467)
* refactor: set info_replication_valkey_compatible=true * test: mark test_cluster_replication_migration as skipped because it's broken
This commit is contained in:
parent
5258bbacd1
commit
48a28c3ea3
5 changed files with 18 additions and 16 deletions
|
@ -123,7 +123,7 @@ ABSL_FLAG(bool, s3_ec2_metadata, false,
|
||||||
ABSL_FLAG(bool, s3_sign_payload, true,
|
ABSL_FLAG(bool, s3_sign_payload, true,
|
||||||
"whether to sign the s3 request payload when uploading snapshots");
|
"whether to sign the s3 request payload when uploading snapshots");
|
||||||
|
|
||||||
ABSL_FLAG(bool, info_replication_valkey_compatible, false,
|
ABSL_FLAG(bool, info_replication_valkey_compatible, true,
|
||||||
"when true - output valkey compatible values for info-replication");
|
"when true - output valkey compatible values for info-replication");
|
||||||
|
|
||||||
ABSL_DECLARE_FLAG(int32_t, port);
|
ABSL_DECLARE_FLAG(int32_t, port);
|
||||||
|
|
|
@ -1523,6 +1523,7 @@ async def test_cluster_config_reapply(df_factory: DflyInstanceFactory):
|
||||||
await close_clients(*[node.client for node in nodes], *[node.admin_client for node in nodes])
|
await close_clients(*[node.client for node in nodes], *[node.admin_client for node in nodes])
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skip("broken")
|
||||||
@dfly_args({"proactor_threads": 4, "cluster_mode": "yes"})
|
@dfly_args({"proactor_threads": 4, "cluster_mode": "yes"})
|
||||||
async def test_cluster_replication_migration(
|
async def test_cluster_replication_migration(
|
||||||
df_factory: DflyInstanceFactory, df_seeder_factory: DflySeederFactory
|
df_factory: DflyInstanceFactory, df_seeder_factory: DflySeederFactory
|
||||||
|
@ -1553,7 +1554,7 @@ async def test_cluster_replication_migration(
|
||||||
|
|
||||||
# generate some data with seederv1
|
# generate some data with seederv1
|
||||||
seeder = df_seeder_factory.create(keys=2000, port=m1.port, cluster_mode=True)
|
seeder = df_seeder_factory.create(keys=2000, port=m1.port, cluster_mode=True)
|
||||||
seeder.run(target_deviation=0.1)
|
await seeder.run(target_deviation=0.1)
|
||||||
|
|
||||||
# start replication from replicas
|
# start replication from replicas
|
||||||
await r1_node.admin_client.execute_command(f"replicaof localhost {m1_node.instance.port}")
|
await r1_node.admin_client.execute_command(f"replicaof localhost {m1_node.instance.port}")
|
||||||
|
@ -1750,7 +1751,7 @@ async def await_stable_sync(m_client: aioredis.Redis, replica_port, timeout=10):
|
||||||
role = await m_client.execute_command("role")
|
role = await m_client.execute_command("role")
|
||||||
return role == [
|
return role == [
|
||||||
"master",
|
"master",
|
||||||
[["127.0.0.1", str(replica_port), "stable_sync"]],
|
[["127.0.0.1", str(replica_port), "online"]],
|
||||||
]
|
]
|
||||||
|
|
||||||
while (time.time() - start) < timeout:
|
while (time.time() - start) < timeout:
|
||||||
|
|
|
@ -27,12 +27,12 @@ M_SLOW = [pytest.mark.slow]
|
||||||
M_STRESS = [pytest.mark.slow, pytest.mark.opt_only]
|
M_STRESS = [pytest.mark.slow, pytest.mark.opt_only]
|
||||||
|
|
||||||
|
|
||||||
async def wait_for_replicas_state(*clients, state="stable_sync", timeout=0.05):
|
async def wait_for_replicas_state(*clients, state="online", timeout=0.05):
|
||||||
"""Wait until all clients (replicas) reach passed state"""
|
"""Wait until all clients (replicas) reach passed state"""
|
||||||
while len(clients) > 0:
|
while len(clients) > 0:
|
||||||
await asyncio.sleep(timeout)
|
await asyncio.sleep(timeout)
|
||||||
roles = await asyncio.gather(*(c.role() for c in clients))
|
roles = await asyncio.gather(*(c.role() for c in clients))
|
||||||
clients = [c for c, role in zip(clients, roles) if role[0] != "replica" or role[3] != state]
|
clients = [c for c, role in zip(clients, roles) if role[0] != "slave" or role[3] != state]
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -135,7 +135,7 @@ async def test_replication_all(
|
||||||
|
|
||||||
async def check_replica_finished_exec(c_replica: aioredis.Redis, m_offset):
|
async def check_replica_finished_exec(c_replica: aioredis.Redis, m_offset):
|
||||||
role = await c_replica.role()
|
role = await c_replica.role()
|
||||||
if role[0] != "replica" or role[3] != "stable_sync":
|
if role[0] != "slave" or role[3] != "online":
|
||||||
return False
|
return False
|
||||||
syncid, r_offset = await c_replica.execute_command("DEBUG REPLICA OFFSET")
|
syncid, r_offset = await c_replica.execute_command("DEBUG REPLICA OFFSET")
|
||||||
|
|
||||||
|
@ -980,13 +980,13 @@ async def test_role_command(df_factory, n_keys=20):
|
||||||
|
|
||||||
assert await c_master.execute_command("role") == [
|
assert await c_master.execute_command("role") == [
|
||||||
"master",
|
"master",
|
||||||
[["127.0.0.1", str(replica.port), "stable_sync"]],
|
[["127.0.0.1", str(replica.port), "online"]],
|
||||||
]
|
]
|
||||||
assert await c_replica.execute_command("role") == [
|
assert await c_replica.execute_command("role") == [
|
||||||
"replica",
|
"slave",
|
||||||
"localhost",
|
"localhost",
|
||||||
str(master.port),
|
str(master.port),
|
||||||
"stable_sync",
|
"online",
|
||||||
]
|
]
|
||||||
|
|
||||||
# This tests that we react fast to socket shutdowns and don't hang on
|
# This tests that we react fast to socket shutdowns and don't hang on
|
||||||
|
@ -994,7 +994,7 @@ async def test_role_command(df_factory, n_keys=20):
|
||||||
master.stop()
|
master.stop()
|
||||||
await asyncio.sleep(0.1)
|
await asyncio.sleep(0.1)
|
||||||
assert await c_replica.execute_command("role") == [
|
assert await c_replica.execute_command("role") == [
|
||||||
"replica",
|
"slave",
|
||||||
"localhost",
|
"localhost",
|
||||||
str(master.port),
|
str(master.port),
|
||||||
"connecting",
|
"connecting",
|
||||||
|
@ -1344,13 +1344,13 @@ async def test_take_over_timeout(df_factory, df_seeder_factory):
|
||||||
|
|
||||||
assert await c_master.execute_command("role") == [
|
assert await c_master.execute_command("role") == [
|
||||||
"master",
|
"master",
|
||||||
[["127.0.0.1", str(replica.port), "stable_sync"]],
|
[["127.0.0.1", str(replica.port), "online"]],
|
||||||
]
|
]
|
||||||
assert await c_replica.execute_command("role") == [
|
assert await c_replica.execute_command("role") == [
|
||||||
"replica",
|
"slave",
|
||||||
"localhost",
|
"localhost",
|
||||||
str(master.port),
|
str(master.port),
|
||||||
"stable_sync",
|
"online",
|
||||||
]
|
]
|
||||||
|
|
||||||
await disconnect_clients(c_master, c_replica)
|
await disconnect_clients(c_master, c_replica)
|
||||||
|
@ -1558,7 +1558,7 @@ async def test_replicaof_flag_replication_waits(df_factory):
|
||||||
|
|
||||||
# check that it is in replica mode, yet status is down
|
# check that it is in replica mode, yet status is down
|
||||||
info = await c_replica.info("replication")
|
info = await c_replica.info("replication")
|
||||||
assert info["role"] == "replica"
|
assert info["role"] == "slave"
|
||||||
assert info["master_host"] == "localhost"
|
assert info["master_host"] == "localhost"
|
||||||
assert info["master_port"] == BASE_PORT
|
assert info["master_port"] == BASE_PORT
|
||||||
assert info["master_link_status"] == "down"
|
assert info["master_link_status"] == "down"
|
||||||
|
@ -2008,6 +2008,7 @@ async def test_policy_based_eviction_propagation(df_factory, df_seeder_factory):
|
||||||
keys_replica = await c_replica.execute_command("keys k*")
|
keys_replica = await c_replica.execute_command("keys k*")
|
||||||
|
|
||||||
assert set(keys_replica).difference(keys_master) == set()
|
assert set(keys_replica).difference(keys_master) == set()
|
||||||
|
assert set(keys_master).difference(keys_replica) == set()
|
||||||
|
|
||||||
await disconnect_clients(c_master, *[c_replica])
|
await disconnect_clients(c_master, *[c_replica])
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ def main():
|
||||||
info = client.info("replication")
|
info = client.info("replication")
|
||||||
assert info["role"] == "master"
|
assert info["role"] == "master"
|
||||||
replication_state = info["slave0"]
|
replication_state = info["slave0"]
|
||||||
assert replication_state["state"] == "stable_sync"
|
assert replication_state["state"] == "online"
|
||||||
|
|
||||||
def is_zero_lag(replication_state):
|
def is_zero_lag(replication_state):
|
||||||
return replication_state["lag"] == 0
|
return replication_state["lag"] == 0
|
||||||
|
|
|
@ -234,7 +234,7 @@ def attach(args):
|
||||||
if args.attach_as_replica:
|
if args.attach_as_replica:
|
||||||
newcomer = Node(args.attach_host, args.attach_port)
|
newcomer = Node(args.attach_host, args.attach_port)
|
||||||
replica_resp = send_command(newcomer, ["info", "replication"])
|
replica_resp = send_command(newcomer, ["info", "replication"])
|
||||||
if replica_resp["role"] != "replica":
|
if replica_resp["role"] != "slave":
|
||||||
die_with_err("Node is not in replica mode")
|
die_with_err("Node is not in replica mode")
|
||||||
if (
|
if (
|
||||||
replica_resp["master_host"] != args.target_host
|
replica_resp["master_host"] != args.target_host
|
||||||
|
|
Loading…
Reference in a new issue