add force reset test

This commit is contained in:
antoniofilipovic 2024-03-22 13:20:57 +01:00
parent 229e66b8bc
commit 3cc2dfafc4

View File

@ -1420,5 +1420,135 @@ def test_multiple_old_mains_single_failover():
time_slept += 0.1
def test_force_reset_works_after_failed_registration():
# Goal of this test is to check when leadership changes
# and we have old MAIN down, that we don't start failover
# 1. Start all instances.
# 2. Check everything works correctly
# 3. Try register instance which doesn't exist
# 4. Enter force reset
# 5. Check that everything works correctly
# 1
safe_execute(shutil.rmtree, TEMP_DIR)
inner_instances_description = get_instances_description_no_setup()
interactive_mg_runner.start_all(inner_instances_description)
setup_queries = [
"ADD COORDINATOR 1 WITH CONFIG {'bolt_server': '127.0.0.1:7690', 'coordinator_server': '127.0.0.1:10111'}",
"ADD COORDINATOR 2 WITH CONFIG {'bolt_server': '127.0.0.1:7691', 'coordinator_server': '127.0.0.1:10112'}",
"REGISTER INSTANCE instance_1 WITH CONFIG {'bolt_server': '127.0.0.1:7687', 'management_server': '127.0.0.1:10011', 'replication_server': '127.0.0.1:10001'};",
"REGISTER INSTANCE instance_2 WITH CONFIG {'bolt_server': '127.0.0.1:7688', 'management_server': '127.0.0.1:10012', 'replication_server': '127.0.0.1:10002'};",
"REGISTER INSTANCE instance_3 WITH CONFIG {'bolt_server': '127.0.0.1:7689', 'management_server': '127.0.0.1:10013', 'replication_server': '127.0.0.1:10003'};",
"SET INSTANCE instance_3 TO MAIN",
]
coord_cursor_3 = connect(host="localhost", port=7692).cursor()
for query in setup_queries:
execute_and_fetch_all(coord_cursor_3, query)
# 2
def show_instances_coord3():
return sorted(list(execute_and_fetch_all(coord_cursor_3, "SHOW INSTANCES;")))
coord_cursor_1 = connect(host="localhost", port=7690).cursor()
def show_instances_coord1():
return sorted(list(execute_and_fetch_all(coord_cursor_1, "SHOW INSTANCES;")))
coord_cursor_2 = connect(host="localhost", port=7691).cursor()
def show_instances_coord2():
return sorted(list(execute_and_fetch_all(coord_cursor_2, "SHOW INSTANCES;")))
leader_data = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "127.0.0.1:10011", "up", "replica"),
("instance_2", "", "127.0.0.1:10012", "up", "replica"),
("instance_3", "", "127.0.0.1:10013", "up", "main"),
]
follower_data = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "", "unknown", "replica"),
("instance_2", "", "", "unknown", "replica"),
("instance_3", "", "", "unknown", "main"),
]
mg_sleep_and_assert(leader_data, show_instances_coord3)
mg_sleep_and_assert(follower_data, show_instances_coord1)
mg_sleep_and_assert(follower_data, show_instances_coord2)
instance_3_cursor = connect(host="localhost", port=7689).cursor()
def show_replicas():
return sorted(list(execute_and_fetch_all(instance_3_cursor, "SHOW REPLICAS;")))
replicas = [
(
"instance_1",
"127.0.0.1:10001",
"sync",
{"behind": None, "status": "ready", "ts": 0},
{"memgraph": {"behind": 0, "status": "ready", "ts": 0}},
),
(
"instance_2",
"127.0.0.1:10002",
"sync",
{"behind": None, "status": "ready", "ts": 0},
{"memgraph": {"behind": 0, "status": "ready", "ts": 0}},
),
]
mg_sleep_and_assert_collection(replicas, show_replicas)
def get_vertex_count_func(cursor):
def get_vertex_count():
return execute_and_fetch_all(cursor, "MATCH (n) RETURN count(n)")[0][0]
return get_vertex_count
vertex_count = 0
instance_1_cursor = connect(port=7687, host="localhost").cursor()
instance_2_cursor = connect(port=7688, host="localhost").cursor()
mg_sleep_and_assert(vertex_count, get_vertex_count_func(instance_1_cursor))
mg_sleep_and_assert(vertex_count, get_vertex_count_func(instance_2_cursor))
with pytest.raises(Exception) as e:
execute_and_fetch_all(
coord_cursor_3,
"REGISTER INSTANCE instance_4 WITH CONFIG {'bolt_server': '127.0.0.1:7680', 'management_server': '127.0.0.1:10050', 'replication_server': '127.0.0.1:10051'};",
)
# This will trigger force reset and choosing of new instance as MAIN
leader_data = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "127.0.0.1:10011", "up", "main"),
("instance_2", "", "127.0.0.1:10012", "up", "replica"),
("instance_3", "", "127.0.0.1:10013", "down", "unknown"),
]
follower_data = [
("coordinator_1", "127.0.0.1:10111", "", "unknown", "coordinator"),
("coordinator_2", "127.0.0.1:10112", "", "unknown", "coordinator"),
("coordinator_3", "127.0.0.1:10113", "", "unknown", "coordinator"),
("instance_1", "", "", "unknown", "main"),
("instance_2", "", "", "unknown", "replica"),
("instance_3", "", "", "unknown", "unknown"),
]
mg_sleep_and_assert(leader_data, show_instances_coord3)
mg_sleep_and_assert(follower_data, show_instances_coord1)
mg_sleep_and_assert(follower_data, show_instances_coord2)
if __name__ == "__main__":
sys.exit(pytest.main([__file__, "-k", "test_force_reset_works_after_failed_registration", "-vv"]))
sys.exit(pytest.main([__file__, "-rA"]))