Skip to content

Commit d1112c9

Browse files
committed
AdminAPI test fixes:
Added check_sandbox_in_metadata() in the available test functions to wait for an instance which was recently added to a cluster to be present in its own metedata schema: there may be some replication delay since the instance is added to the metadata on the primary. Included the usage of check_sandbox_in_metadata() for the dba_cluster_add_instance and super_read_only_handling tests. Those tests were failing due to the assumption that if an instance has the "ONLINE" state it already has all the transactions applied and therefore has the most recent version of the metadata schema. This may not be always true due to the slowness of the system running the tests. Added connection attempt in try_restart_sandbox() to verify if the instance is up-and-running and accepting connection before returning. Replaced the use of killSandboxInstance() with stopSandboxInstance() in several tests to make sure the instance is completely dead before restarting it to make sure the restart does not fail. Made use of try_restart_sandbox too instead of startSandboxInstance().
1 parent 24fffed commit d1112c9

17 files changed

+125
-85
lines changed

unittest/scripts/js_devapi/scripts/dba_cluster_add_instance.js

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@ add_instance_to_cluster(single, __mysql_sandbox_port2);
1717
// Waiting for the second added instance to become online
1818
wait_slave_state(single, uri2, "ONLINE");
1919

20+
// Wait for the second added instance to fetch all the replication data
21+
wait_sandbox_in_metadata(__mysql_sandbox_port2);
22+
2023
// Connect to the future new seed node
2124
shell.connect({scheme: 'mysql', host: localhost, port: __mysql_sandbox_port2, user: 'root', password: 'root'});
2225
var singleSession2 = session;
@@ -34,7 +37,7 @@ if (__sandbox_dir)
3437
else
3538
dba.killSandboxInstance(__mysql_sandbox_port1);
3639

37-
wait_slave_state(single, uri1, ["UNREACHABLE", "OFFLINE"]);
40+
wait_slave_state(single, uri1, ["(MISSING)"]);
3841

3942
//@ Restore the quorum
4043
single.forceQuorumUsingPartitionOf({host: localhost, port: __mysql_sandbox_port2, user: 'root', password:'root'});

unittest/scripts/js_devapi/scripts/dba_cluster_interactive.js

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -119,16 +119,14 @@ if (__sandbox_dir)
119119
else
120120
dba.killSandboxInstance(__mysql_sandbox_port3);
121121

122-
wait_slave_state(Cluster, 'third_sandbox', ["UNREACHABLE", "OFFLINE"]);
122+
wait_slave_state(Cluster, 'third_sandbox', ["(MISSING)"]);
123123

124124
//@# Dba: start instance 3
125125
if (__sandbox_dir)
126126
dba.startSandboxInstance(__mysql_sandbox_port3, {sandboxDir: __sandbox_dir});
127127
else
128128
dba.startSandboxInstance(__mysql_sandbox_port3);
129129

130-
wait_slave_state(Cluster, 'third_sandbox', ["OFFLINE", "(MISSING)"]);
131-
132130
//@: Cluster: rejoinInstance errors
133131
Cluster.rejoinInstance();
134132
Cluster.rejoinInstance(1,2,3);

unittest/scripts/js_devapi/scripts/dba_cluster_multimaster_interactive.js

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -72,22 +72,17 @@ Cluster.status()
7272

7373
// Rejoin tests
7474

75-
//@# Dba: kill instance 3
75+
//@# Dba: stop instance 3
76+
// Use stop sandbox instance to make sure the instance is gone before restarting it
7677
if (__sandbox_dir)
77-
dba.killSandboxInstance(__mysql_sandbox_port3, {sandboxDir:__sandbox_dir});
78+
dba.stopSandboxInstance(__mysql_sandbox_port3, {sandboxDir:__sandbox_dir, password: 'root'});
7879
else
79-
dba.killSandboxInstance(__mysql_sandbox_port3);
80+
dba.stopSandboxInstance(__mysql_sandbox_port3, {password: 'root'});
8081

81-
// XCOM needs time to kick out the member of the group. The GR team has a patch to fix this
82-
// But won't be available for the GA release. So we need to wait until the instance is reported
83-
// as offline
84-
wait_slave_state(Cluster, uri3, ["OFFLINE", "UNREACHABLE"]);
82+
wait_slave_state(Cluster, uri3, ["(MISSING)"]);
8583

86-
//@# Dba: start instance 3
87-
if (__sandbox_dir)
88-
dba.startSandboxInstance(__mysql_sandbox_port3, {sandboxDir: __sandbox_dir});
89-
else
90-
dba.startSandboxInstance(__mysql_sandbox_port3);
84+
// start instance 3
85+
try_restart_sandbox(__mysql_sandbox_port3);
9186

9287
//@: Cluster: rejoinInstance errors
9388
Cluster.rejoinInstance();
@@ -112,4 +107,6 @@ Cluster.status();
112107
Cluster.dissolve({force: true})
113108

114109
// Disable super-read-only (BUG#26422638)
115-
session.runSql("SET GLOBAL SUPER_READ_ONLY = 0;")
110+
shell.connect({scheme: 'mysql', host: localhost, port: __mysql_sandbox_port1, user: 'root', password: 'root'});
111+
session.runSql("SET GLOBAL SUPER_READ_ONLY = 0;");
112+
session.close();

unittest/scripts/js_devapi/scripts/dba_cluster_multimaster_no_interactive.js

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -66,22 +66,17 @@ Cluster.status();
6666

6767
// Rejoin tests
6868

69-
//@# Dba: kill instance 3
69+
//@# Dba: stop instance 3
70+
// Use stop sandbox instance to make sure the instance is gone before restarting it
7071
if (__sandbox_dir)
71-
dba.killSandboxInstance(__mysql_sandbox_port3, {sandboxDir:__sandbox_dir});
72+
dba.stopSandboxInstance(__mysql_sandbox_port3, {sandboxDir:__sandbox_dir, password: 'root'});
7273
else
73-
dba.killSandboxInstance(__mysql_sandbox_port3);
74+
dba.stopSandboxInstance(__mysql_sandbox_port3, {password: 'root'});
7475

75-
// XCOM needs time to kick out the member of the group. The GR team has a patch to fix this
76-
// But won't be available for the GA release. So we need to wait until the instance is reported
77-
// as offline
78-
wait_slave_state(Cluster, uri3, ["OFFLINE", "UNREACHABLE"]);
76+
wait_slave_state(Cluster, uri3, ["(MISSING)"]);
7977

80-
//@# Dba: start instance 3
81-
if (__sandbox_dir)
82-
dba.startSandboxInstance(__mysql_sandbox_port3, {sandboxDir: __sandbox_dir});
83-
else
84-
dba.startSandboxInstance(__mysql_sandbox_port3);
78+
// start instance 3
79+
try_restart_sandbox(__mysql_sandbox_port3);
8580

8681
//@ Cluster: rejoinInstance errors
8782
Cluster.rejoinInstance();

unittest/scripts/js_devapi/scripts/dba_cluster_rejoin_instance.js

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -47,20 +47,18 @@ cluster.addInstance({dbUser: 'foo', host: 'localhost', port:__mysql_sandbox_port
4747
// Waiting for the instance 3 to become online
4848
wait_slave_state(cluster, uri3, "ONLINE");
4949

50-
// kill instance 2
50+
// stop instance 2
51+
// Use stop sandbox instance to make sure the instance is gone before restarting it
5152
if (__sandbox_dir)
52-
dba.killSandboxInstance(__mysql_sandbox_port2, {sandboxDir:__sandbox_dir});
53+
dba.stopSandboxInstance(__mysql_sandbox_port2, {sandboxDir:__sandbox_dir, password: 'root'});
5354
else
54-
dba.killSandboxInstance(__mysql_sandbox_port2);
55+
dba.stopSandboxInstance(__mysql_sandbox_port2, {password: 'root'});
5556

5657
// Waiting for instance 2 to become missing
5758
wait_slave_state(cluster, uri2, "(MISSING)");
5859

5960
// Start instance 2
60-
if (__sandbox_dir)
61-
dba.startSandboxInstance(__mysql_sandbox_port2, {sandboxDir:__sandbox_dir});
62-
else
63-
dba.startSandboxInstance(__mysql_sandbox_port2);
61+
try_restart_sandbox(__mysql_sandbox_port2);
6462

6563
//@<OUT> Cluster status
6664
cluster.status()

unittest/scripts/js_devapi/scripts/dba_super_read_only_handling.js

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,16 @@ var cluster = dba.createCluster('sample');
4848
cluster.addInstance(connection2);
4949
wait_slave_state(cluster, uri2, "ONLINE");
5050

51+
// Wait for the second added instance to fetch all the replication data
52+
wait_sandbox_in_metadata(__mysql_sandbox_port2);
53+
5154
//@<OUT> Adds other instance
5255
cluster.addInstance(connection3);
5356
wait_slave_state(cluster, uri3, "ONLINE");
5457

58+
// Wait for the third added instance to fetch all the replication data
59+
wait_sandbox_in_metadata(__mysql_sandbox_port3);
60+
5561
//@<OUT> Rejoins an instance
5662
stop_sandbox(__mysql_sandbox_port3);
5763
wait_slave_state(cluster, uri3, "(MISSING)");

unittest/scripts/js_devapi/setup/setup.js

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,34 @@ function wait_slave_state(cluster, slave_uri, states) {
149149
recov_cluster = null;
150150
}
151151

152+
function wait_sandbox(timeout, wait_interval, condition, sandbox_port){
153+
waiting = 0;
154+
res = condition([sandbox_port]);
155+
while(!res && waiting < timeout) {
156+
os.sleep(wait_interval);
157+
waiting = waiting + 1;
158+
res = condition([sandbox_port]);
159+
}
160+
return res;
161+
}
162+
163+
function check_sandbox_in_metadata(instance_port) {
164+
var sandbox_count_metadata =
165+
session.runSql("select count(*) from mysql_innodb_cluster_metadata.instances where instance_name = 'localhost:" + instance_port + "'").fetchOne()[0];
166+
167+
println("---> count(*) sandbox in metadata = " + sandbox_count_metadata);
168+
169+
return sandbox_count_metadata == "1";
170+
}
171+
172+
function wait_sandbox_in_metadata(instance_port) {
173+
var connected = connect_to_sandbox([instance_port]);
174+
if (connected) {
175+
wait_sandbox(60, 1, check_sandbox_in_metadata, instance_port);
176+
session.close();
177+
}
178+
}
179+
152180
function connect_to_sandbox(params) {
153181
var port = params[0];
154182
var connected = false;
@@ -403,6 +431,11 @@ function try_restart_sandbox(port) {
403431
try {
404432
dba.startSandboxInstance(port, options);
405433

434+
// Try to establish a connection to the instance to make sure
435+
// it's up and running.
436+
shell.connect({scheme: 'mysql', host: localhost, port: port, user: 'root', password: 'root'});
437+
session.close();
438+
406439
println(' succeeded');
407440
return true;
408441
} catch (err) {

unittest/scripts/js_devapi/validation/dba_cluster_multimaster_interactive.js

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -235,10 +235,7 @@ The instance 'root@localhost:<<<__mysql_sandbox_port3>>>' was successfully added
235235
}
236236
}
237237

238-
//@# Dba: kill instance 3
239-
||
240-
241-
//@# Dba: start instance 3
238+
//@# Dba: stop instance 3
242239
||
243240

244241
//@: Cluster: rejoinInstance errors

unittest/scripts/js_devapi/validation/dba_cluster_multimaster_no_interactive.js

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -166,10 +166,7 @@
166166
}
167167
}
168168

169-
//@# Dba: kill instance 3
170-
||
171-
172-
//@# Dba: start instance 3
169+
//@# Dba: stop instance 3
173170
||
174171

175172
//@ Cluster: rejoinInstance errors

unittest/scripts/py_devapi/scripts/dba_cluster_add_instance.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@
1717
# Waiting for the second added instance to become online
1818
wait_slave_state(single, uri2, "ONLINE")
1919

20+
# Wait for the second added instance to fetch all the replication data
21+
wait_sandbox_in_metadata(__mysql_sandbox_port2)
22+
2023
# Connect to the future new seed node
2124
shell.connect({'scheme': 'mysql', 'host': localhost, 'port': __mysql_sandbox_port2, 'user': 'root', 'password': 'root'})
2225
single_session2 = session
@@ -30,7 +33,7 @@
3033
else:
3134
dba.kill_sandbox_instance(__mysql_sandbox_port1)
3235

33-
wait_slave_state(single, uri1, ["UNREACHABLE", "OFFLINE"])
36+
wait_slave_state(single, uri1, ["(MISSING)"])
3437

3538
#@ Restore the quorum
3639
single.force_quorum_using_partition_of({'host': localhost, 'port': __mysql_sandbox_port2, 'user': 'root', 'password':'root'})

unittest/scripts/py_devapi/scripts/dba_cluster_interactive.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@
123123
else:
124124
dba.kill_sandbox_instance(__mysql_sandbox_port3)
125125

126-
wait_slave_state(cluster, 'third_sandbox', ["UNREACHABLE", "OFFLINE"])
126+
wait_slave_state(cluster, 'third_sandbox', ["(MISSING)"])
127127

128128
#@# Dba: start instance 3
129129
if __sandbox_dir:

unittest/scripts/py_devapi/scripts/dba_cluster_multimaster_interactive.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -73,22 +73,16 @@
7373

7474
# Rejoin tests
7575

76-
#@# Dba: kill instance 3
76+
#@# Dba: stop instance 3
7777
if __sandbox_dir:
78-
dba.kill_sandbox_instance(__mysql_sandbox_port3, {"sandboxDir":__sandbox_dir})
78+
dba.stop_sandbox_instance(__mysql_sandbox_port3, {'sandboxDir': __sandbox_dir, 'password': 'root'})
7979
else:
80-
dba.kill_sandbox_instance(__mysql_sandbox_port3)
80+
dba.stop_sandbox_instance(__mysql_sandbox_port3, {'password': 'root'})
8181

82-
# XCOM needs time to kick out the member of the group. The GR team has a patch to fix this
83-
# But won't be available for the GA release. So we need to wait until the instance is reported
84-
# as offline
85-
wait_slave_state(cluster, uri3, ["OFFLINE", "UNREACHABLE"])
82+
wait_slave_state(cluster, uri3, ["(MISSING)"])
8683

87-
#@# Dba: start instance 3
88-
if __sandbox_dir:
89-
dba.start_sandbox_instance(__mysql_sandbox_port3, {"sandboxDir": __sandbox_dir})
90-
else:
91-
dba.start_sandbox_instance(__mysql_sandbox_port3)
84+
# start instance 3
85+
try_restart_sandbox(__mysql_sandbox_port3)
9286

9387
#@: Cluster: rejoin_instance errors
9488
cluster.rejoin_instance()
@@ -111,3 +105,8 @@
111105
cluster.status()
112106

113107
cluster.dissolve({'force': True})
108+
109+
# Disable super-read-only (BUG#26422638)
110+
shell.connect({'scheme': 'mysql', 'host': localhost, 'port': __mysql_sandbox_port1, 'user': 'root', 'password': 'root'})
111+
session.run_sql("SET GLOBAL SUPER_READ_ONLY = 0;")
112+
session.close()

unittest/scripts/py_devapi/scripts/dba_cluster_multimaster_no_interactive.py

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -65,22 +65,16 @@
6565

6666
# Rejoin tests
6767

68-
#@# Dba: kill instance 3
68+
#@# Dba: stop instance 3
6969
if __sandbox_dir:
70-
dba.kill_sandbox_instance(__mysql_sandbox_port3, {'sandboxDir': __sandbox_dir})
70+
dba.stop_sandbox_instance(__mysql_sandbox_port3, {'sandboxDir': __sandbox_dir, 'password': 'root'})
7171
else:
72-
dba.kill_sandbox_instance(__mysql_sandbox_port3)
72+
dba.stop_sandbox_instance(__mysql_sandbox_port3, {'password': 'root'})
7373

74-
# XCOM needs time to kick out the member of the group. The GR team has a patch to fix this
75-
# But won't be available for the GA release. So we need to wait until the instance is reported
76-
# as offline
77-
wait_slave_state(cluster, uri3, ["OFFLINE", "UNREACHABLE"])
74+
wait_slave_state(cluster, uri3, ["(MISSING)"])
7875

79-
#@# Dba: start instance 3
80-
if __sandbox_dir:
81-
dba.start_sandbox_instance(__mysql_sandbox_port3, {'sandboxDir': __sandbox_dir})
82-
else:
83-
dba.start_sandbox_instance(__mysql_sandbox_port3)
76+
# start instance 3
77+
try_restart_sandbox(__mysql_sandbox_port3)
8478

8579
#@ Cluster: rejoin_instance errors
8680
cluster.rejoin_instance()

unittest/scripts/py_devapi/scripts/dba_cluster_rejoin_instance.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -47,20 +47,18 @@
4747
# Waiting for the instance 3 to become online
4848
wait_slave_state(cluster, uri3, "ONLINE")
4949

50-
# kill instance 2
50+
# stop instance 2
51+
# Use stop sandbox instance to make sure the instance is gone before restarting it
5152
if __sandbox_dir:
52-
dba.kill_sandbox_instance(__mysql_sandbox_port2, {'sandboxDir':__sandbox_dir})
53+
dba.stop_sandbox_instance(__mysql_sandbox_port2, {'sandboxDir':__sandbox_dir, 'password': 'root'})
5354
else:
54-
dba.kill_sandbox_instance(__mysql_sandbox_port2)
55+
dba.stop_sandbox_instance(__mysql_sandbox_port2, {'password': 'root'})
5556

5657
# Waiting for instance 2 to become missing
5758
wait_slave_state(cluster, uri2, "(MISSING)")
5859

5960
# Start instance 2
60-
if __sandbox_dir:
61-
dba.start_sandbox_instance(__mysql_sandbox_port2, {'sandboxDir':__sandbox_dir})
62-
else:
63-
dba.start_sandbox_instance(__mysql_sandbox_port2)
61+
try_restart_sandbox(__mysql_sandbox_port2)
6462

6563
#@<OUT> Cluster status
6664
cluster.status()

unittest/scripts/py_devapi/setup/setup.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,28 @@ def wait_slave_state(cluster, slave_uri, states):
148148

149149
recov_cluster = None
150150

151+
def wait_sandbox(timeout, wait_interval, condition, sandbox_port):
152+
waiting = 0
153+
res = condition([sandbox_port])
154+
while not res and waiting < timeout:
155+
time.sleep(wait_interval)
156+
waiting = waiting + 1
157+
res = condition([sandbox_port])
158+
return res
159+
160+
def check_sandbox_in_metadata(instance_port):
161+
sandbox_count_metadata = session.run_sql("select count(*) from mysql_innodb_cluster_metadata.instances where instance_name = 'localhost:{0}'".format(instance_port)).fetch_one()[0]
162+
163+
print "---> count(*) sandbox in metadata = %s" % sandbox_count_metadata
164+
165+
return sandbox_count_metadata == "1"
166+
167+
def wait_sandbox_in_metadata(instance_port):
168+
connected = connect_to_sandbox([instance_port]);
169+
if (connected):
170+
wait_sandbox(60, 1, check_sandbox_in_metadata, instance_port)
171+
session.close()
172+
151173
# Smart deployment routines
152174

153175
def connect_to_sandbox(params):
@@ -379,6 +401,12 @@ def try_restart_sandbox(port):
379401
def try_start():
380402
try:
381403
dba.start_sandbox_instance(port, options)
404+
405+
# Try to establish a connection to the instance to make sure
406+
# it's up and running.
407+
shell.connect({'host': localhost, 'port': port, 'user': 'root', 'password': 'root'})
408+
session.close();
409+
382410
print "succeeded"
383411
return True
384412
except Exception, err:

unittest/scripts/py_devapi/validation/dba_cluster_multimaster_interactive.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -235,10 +235,7 @@
235235
}
236236
}
237237

238-
#@# Dba: kill instance 3
239-
||
240-
241-
#@# Dba: start instance 3
238+
#@# Dba: stop instance 3
242239
||
243240

244241
#@: Cluster: rejoin_instance errors

0 commit comments

Comments
 (0)