Commit e8165aca authored by unknown's avatar unknown

increase save_master_pos timeout to 30 seconds for higher test predictability

    better replicaiton test cleanup to ensure better test predictability
+ some ndb test cleanup


client/mysqltest.c:
  increase save_master_pos timeout to 30 seconds for higher test predictability
mysql-test/extra/rpl_tests/rpl_ddl.test:
  better replicaiton test cleanup to ensure better test predictability
mysql-test/extra/rpl_tests/rpl_row_UUID.test:
  better replicaiton test cleanup to ensure better test predictability
mysql-test/r/rpl_ndb_dd_advance.result:
  better replicaiton test cleanup to ensure better test predictability
mysql-test/r/rpl_ndb_sync.result:
  better replicaiton test cleanup to ensure better test predictability
mysql-test/t/disabled.def:
  better replicaiton test cleanup to ensure better test predictability
mysql-test/t/rpl_ndb_bank.test:
  better replicaiton test cleanup to ensure better test predictability
mysql-test/t/rpl_ndb_basic.test:
  better replicaiton test cleanup to ensure better test predictability
mysql-test/t/rpl_ndb_dd_advance.test:
  better replicaiton test cleanup to ensure better test predictability
mysql-test/t/rpl_ndb_dd_basic.test:
  better replicaiton test cleanup to ensure better test predictability
mysql-test/t/rpl_ndb_sync.test:
  better replicaiton test cleanup to ensure better test predictability
parent 5dbd32e4
......@@ -1596,9 +1596,9 @@ int do_sync_with_master2(long offset)
It may be that the slave SQL thread has not started yet, though START
SLAVE has been issued ?
*/
if (tries++ == 3)
if (tries++ == 30)
die("could not sync with master ('%s' returned NULL)", query_buf);
sleep(1); /* So at most we will wait 3 seconds and make 4 tries */
sleep(1); /* So at most we will wait 30 seconds and make 31 tries */
mysql_free_result(res);
goto wait_for_position;
}
......@@ -1664,14 +1664,14 @@ int do_save_master_pos()
{
ulonglong epoch=0, tmp_epoch= 0;
int count= 0;
do
int do_continue= 1;
while (do_continue)
{
const char binlog[]= "binlog";
const char latest_trans_epoch[]=
"latest_trans_epoch=";
const char latest_applied_binlog_epoch[]=
"latest_applied_binlog_epoch=";
const char latest_handled_binlog_epoch[]=
"latest_handled_binlog_epoch=";
if (count)
sleep(1);
if (mysql_query(mysql, query= "show engine ndb status"))
......@@ -1701,26 +1701,32 @@ int do_save_master_pos()
start_lineno, latest_trans_epoch, query);
}
/* latest_applied_binlog_epoch */
while (*status && strncmp(status, latest_applied_binlog_epoch,
sizeof(latest_applied_binlog_epoch)-1))
while (*status && strncmp(status, latest_handled_binlog_epoch,
sizeof(latest_handled_binlog_epoch)-1))
status++;
if (*status)
{
status+= sizeof(latest_applied_binlog_epoch)-1;
status+= sizeof(latest_handled_binlog_epoch)-1;
tmp_epoch= strtoull(status, (char**) 0, 10);
}
else
die("line %u: result does not contain '%s' in '%s'",
start_lineno, latest_applied_binlog_epoch, query);
start_lineno, latest_handled_binlog_epoch, query);
break;
}
}
mysql_free_result(res);
if (!row)
die("line %u: result does not contain '%s' in '%s'",
start_lineno, binlog, query);
count++;
} while (tmp_epoch < epoch && count <= 3);
if (tmp_epoch >= epoch)
do_continue= 0;
else if (count > 30)
{
break;
}
mysql_free_result(res);
}
}
}
#endif
......
......@@ -507,4 +507,5 @@ DROP DATABASE IF EXISTS mysqltest2;
DROP DATABASE IF EXISTS mysqltest3;
--enable_warnings
-- source include/master-slave-end.inc
......@@ -80,3 +80,4 @@ DROP TABLE test.t2;
# be removed at next testsuite run.
# End of 5.0 test case
-- source include/master-slave-end.inc
......@@ -370,13 +370,10 @@ COUNT(*)
10000
***** Add some more records to master *********
***** Finsh the slave sync process *******
* 1. *
@the_epoch:=MAX(epoch)
<the_epoch>
* 2. *
@the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1)
<the_pos> master-bin.000001
* 3. *
* 4. *
* 5. *
START SLAVE;
......
......@@ -25,13 +25,13 @@ hex(c2) hex(c3) c1
0 1 BCDEF
1 0 CD
0 0 DEFGHIJKL
CREATE TEMPORARY TABLE IF NOT EXISTS cluster.backup_info (id INT, backup_id INT)ENGINE=HEAP;
DELETE FROM cluster.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster.backup_info FIELDS TERMINATED BY ',';
SELECT @the_backup_id:=backup_id FROM cluster.backup_info;
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM test.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
SELECT @the_backup_id:=backup_id FROM test.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
DROP TABLE cluster.backup_info;
DROP TABLE test.backup_info;
UPDATE t1 SET c2=0 WHERE c3="row2";
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
hex(c1) hex(c2) c3
......
......@@ -17,15 +17,15 @@ ndb_autodiscover2 : Needs to be fixed w.r.t binlog
#ndb_cache2 : Bug #18597 simultaneous drop table and ndb statistics update triggers node failure
#ndb_gis : Bug #18600 ndb_gis test failure
#ndb_load : Bug#17233 failed load data from infile causes mysqld dbug_assert, binlog not flushed
#partition_03ndb : Bug#16385 Partitions: crash when updating a range partitioned NDB table
partition_03ndb : Bug#16385 Partitions: crash when updating a range partitioned NDB table
ps_7ndb : dbug assert in RBR mode when executing test suite
#rpl_ndb_2innodb : assertion in get_parts_for_update()
#rpl_ndb_2myisam : assertion in get_parts_for_update()
rpl_ndb_2innodb : Bug #19004 ndb: partition by range and update hangs (note, test must also be updated to have primary key with blob table)
rpl_ndb_2myisam : Bug #19004 ndb: partition by range and update hangs (note, test must also be updated to have primary key with blob table)
rpl_ndb_auto_inc : Bug#17086 CR: auto_increment_increment and auto_increment_offset produce duplicate key er
#rpl_ndb_basic : Bug #18592 rpl_ndb_basic failure
#rpl_ndb_dd_advance : Bug #18924 rpl_ndb_dd_advance failure
#rpl_ndb_dd_basic : Bug #18569 rpl_ndb_dd_basic failure
#rpl_ndb_ddl : master hangs
rpl_ndb_ddl : result file needs update + test needs to checked
#rpl_ndb_insert_ignore : Bug #18567 rpl_ndb_insert_ignore failure
#rpl_ndb_innodb2ndb : Bug#18261: Cluster Replication: tests rpl_ndb_xxx2ndb fails
#rpl_ndb_log : result not deterministic, TBD if should remove
......
......@@ -132,34 +132,7 @@ let the_backup_id=`select @the_backup_id`;
#
# now setup replication to continue from last epoch
# 1. get apply_status epoch from slave
# 2. get corresponding _next_ binlog postition from master
# 3. change master on slave
# 4. start the replication
# 1.
--connection slave
--replace_column 1 <the_epoch>
SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status;
--let $the_epoch= `select @the_epoch`
# 2.
--connection master
--replace_result $the_epoch <the_epoch>
--replace_column 1 <the_pos>
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM cluster.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
--let $the_pos= `SELECT @the_pos`
--let $the_file= `SELECT @the_file`
# 3.
--connection slave
--replace_result $the_pos <the_pos>
eval CHANGE MASTER TO
master_log_file = '$the_file',
master_log_pos = $the_pos;
# 4.
--source include/ndb_setup_slave.inc
--connection slave
START SLAVE;
......
......@@ -146,4 +146,4 @@ select * from t1 order by nid;
# cleanup
--connection master
DROP TABLE t1;
sync_slave_with_master;
-- source include/master-slave-end.inc
......@@ -479,29 +479,9 @@ while ($j)
# 5. start the replication
--echo ***** Finsh the slave sync process *******
# 1.
--echo * 1. *
connection slave;
--disable_query_log
--replace_column 1 <the_epoch>
SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status;
--let $the_epoch= `select @the_epoch`
# 2.
--echo * 2. *
connection master;
--replace_column 1 <the_pos>
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM cluster.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
--let $the_pos= `SELECT @the_pos`
--let $the_file= `SELECT @the_file`
# 3.
--echo * 3. *
connection slave;
eval CHANGE MASTER TO
master_log_file = '$the_file',
master_log_pos = $the_pos;
# 1. 2. 3.
--source include/ndb_setup_slave.inc
--enable_query_log
# 4.
......
......@@ -82,4 +82,4 @@ drop datafile 'datafile02.dat'
engine=ndb;
DROP TABLESPACE ts1 ENGINE=NDB;
DROP LOGFILE GROUP lg1 ENGINE=NDB;
--sync_slave_with_master
-- source include/master-slave-end.inc
......@@ -8,13 +8,13 @@
#
# stop the save
connection slave;
--connection slave
STOP SLAVE;
CREATE DATABASE ndbsynctest;
USE ndbsynctest;
# get some data on the master
connection master;
--connection master
CREATE DATABASE ndbsynctest;
USE ndbsynctest;
CREATE TABLE t1 (c1 BIT(1) NOT NULL, c2 BIT(1) NOT NULL, c3 CHAR(15), PRIMARY KEY(c3)) ENGINE = NDB ;
......@@ -25,21 +25,14 @@ SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
# take a backup on master
--exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "start backup" >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > $MYSQLTEST_VARDIR/tmp.dat
CREATE TEMPORARY TABLE IF NOT EXISTS cluster.backup_info (id INT, backup_id INT)ENGINE=HEAP;
DELETE FROM cluster.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster.backup_info FIELDS TERMINATED BY ',';
--replace_column 1 <the_backup_id>
SELECT @the_backup_id:=backup_id FROM cluster.backup_info;
let the_backup_id=`select @the_backup_id` ;
DROP TABLE cluster.backup_info;
--source include/ndb_backup.inc
# update a row
UPDATE t1 SET c2=0 WHERE c3="row2";
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
# restore on slave, first check that nothing is there
connection slave
--connection slave
# we should have no tables
SHOW TABLES;
......@@ -66,40 +59,16 @@ SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
#
# now setup replication to continue from last epoch
# 1. get apply_status epoch from slave
# 2. get corresponding _next_ binlog postition from master
# 3. change master on slave
# 1.
connection slave;
--replace_column 1 <the_epoch>
SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status;
let $the_epoch= `select @the_epoch` ;
# 2.
connection master;
--replace_result $the_epoch <the_epoch>
--replace_column 1 <the_pos>
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM cluster.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
let $the_pos= `SELECT @the_pos` ;
let $the_file= `SELECT @the_file` ;
# 3.
connection slave;
--replace_result $the_pos <the_pos>
eval CHANGE MASTER TO
master_log_file = '$the_file',
master_log_pos = $the_pos ;
--source include/ndb_setup_slave.inc
--connection slave
START SLAVE;
#
#
#
connection master;
#sync_slave_with_master;
--sleep 2
connection slave;
--connection master
--sync_slave_with_master
--connection slave
--replace_result $MASTER_MYPORT MASTER_PORT
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
SHOW SLAVE STATUS;
......@@ -111,22 +80,21 @@ SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
# Cleanup
#
connection master;
--connection master
DROP DATABASE ndbsynctest;
#sync_slave_with_master;
--sleep 2
connection slave;
--sync_slave_with_master
--connection slave
STOP SLAVE;
#
# Test some replication commands
#
connection master;
--connection master
reset master;
# should now contain nothing
select * from cluster.binlog_index;
connection slave;
--connection slave
reset slave;
# should now contain nothing
select * from cluster.apply_status;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment