Commit d328705a authored by Jan Lindström's avatar Jan Lindström Committed by Julius Goryavsky

MDEV-34170 : table gtid_slave_pos entries never been deleted with wsrep_gtid_mode = 0

Problem was that updates to mysql.gtid_slave_pos table were
replicated even when they were newer used and because that
newer deleted. Avoid replication of mysql.gtid_slave_pos
table if wsrep_gtid_mode=OFF.
Signed-off-by: default avatarJulius Goryavsky <julius.goryavsky@mariadb.com>
parent a02773f7
......@@ -22,12 +22,6 @@ EXPECT_1
1
gtid_binlog_state_equal
0
connection node_2;
SELECT COUNT(*) AS EXPECT_1 FROM t1;
EXPECT_1
1
gtid_binlog_state_equal
0
#cleanup
connection node_3;
DROP TABLE t1;
......
connection node_2;
connection node_1;
connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
create user repl@'%' identified by 'repl';
grant all on *.* to repl@'%';
flush privileges;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
connection node_1;
connection node_2;
connection node_2;
START SLAVE;
connection node_3;
CREATE TABLE t1 (id bigint primary key, msg varchar(100)) engine=innodb;
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
EXPECT_10000
10000
connection node_2;
SELECT COUNT(*) > 0 AS EXPECT_1 FROM mysql.gtid_slave_pos;
EXPECT_1
1
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
EXPECT_10000
10000
connection node_1;
SELECT COUNT(*) AS EXPECT_0 FROM mysql.gtid_slave_pos;
EXPECT_0
0
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
EXPECT_10000
10000
connection node_2;
# Verify that graceful shutdown succeeds.
# Force SST
connection node_1;
# Waiting until node_2 is not part of cluster anymore
connection node_2;
# Start node_2 again
¤ Wait until node_2 is back on cluster
connection node_2;
call mtr.add_suppression("Slave: Operation CREATE USER failed for .*");
SELECT COUNT(*) AS EXPECT_0 FROM mysql.gtid_slave_pos;
EXPECT_0
0
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
EXPECT_10000
10000
connection node_1;
SELECT COUNT(*) AS EXPECT_0 FROM mysql.gtid_slave_pos;
EXPECT_0
0
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
EXPECT_10000
10000
connection node_3;
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
EXPECT_10000
10000
connection node_2;
STOP SLAVE;
RESET SLAVE ALL;
connection node_3;
RESET MASTER;
drop table t1;
connection node_2;
DROP TABLE t1;
connection node_1;
connection node_1;
disconnect node_3;
disconnect node_2;
disconnect node_1;
# End of test
......@@ -46,18 +46,8 @@ SELECT LENGTH(@@global.gtid_binlog_state) > 1;
SELECT COUNT(*) AS EXPECT_1 FROM t1;
--disable_query_log
--eval SELECT '$gtid_binlog_state_node1' = @@global.gtid_binlog_state AS gtid_binlog_state_equal;
--enable_query_log
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM t1;
--source include/wait_condition.inc
SELECT COUNT(*) AS EXPECT_1 FROM t1;
# Note that MyISAM tables are not replicated by Galera so we do not here
# check node_2
--disable_query_log
--eval SELECT '$gtid_binlog_state_node1' = @@global.gtid_binlog_state AS gtid_binlog_state_equal;
......
!include ../galera_2nodes_as_slave.cnf
[mysqld]
wsrep-debug=1
server_id=15
wsrep_gtid_mode=OFF
wsrep_gtid_domain_id=16
gtid_domain_id=11
gtid_strict_mode=OFF
#
# Test Galera as a replica to a MySQL async replication
#
# The galera/galera_2node_slave.cnf describes the setup of the nodes
#
--source include/force_restart.inc
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_sequence.inc
# As node #3 is not a Galera node, and galera_cluster.inc does not open connetion to it
# we open the node_3 connection here
--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
create user repl@'%' identified by 'repl';
grant all on *.* to repl@'%';
flush privileges;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
--let $node_1 = node_1
--let $node_2 = node_2
--source include/auto_increment_offset_save.inc
--connection node_2
--disable_query_log
--eval CHANGE MASTER TO master_host='127.0.0.1', master_user='repl', master_password='repl', master_port=$NODE_MYPORT_3, master_use_gtid=slave_pos;
--enable_query_log
START SLAVE;
--connection node_3
CREATE TABLE t1 (id bigint primary key, msg varchar(100)) engine=innodb;
--disable_query_log
INSERT INTO t1 SELECT seq, 'test' from seq_1_to_10000;
--enable_query_log
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 10000 FROM t1;
--source include/wait_condition.inc
#
# Node_2 is slave so mysql.gtid_slave_pos table is also replicated
#
SELECT COUNT(*) > 0 AS EXPECT_1 FROM mysql.gtid_slave_pos;
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
--connection node_1
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 10000 FROM t1;
--source include/wait_condition.inc
#
# mysql-gtid_slave_pos table should not be replicated by Galera
#
SELECT COUNT(*) AS EXPECT_0 FROM mysql.gtid_slave_pos;
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
--connection node_2
--echo # Verify that graceful shutdown succeeds.
--source include/shutdown_mysqld.inc
--echo # Force SST
--remove_file $MYSQLTEST_VARDIR/mysqld.2/data/grastate.dat
--connection node_1
--echo # Waiting until node_2 is not part of cluster anymore
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
--source include/wait_condition.inc
--connection node_2
--echo # Start node_2 again
--source include/start_mysqld.inc
--echo ¤ Wait until node_2 is back on cluster
--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
--source include/wait_condition.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready';
--source include/wait_condition.inc
--connection node_2
call mtr.add_suppression("Slave: Operation CREATE USER failed for .*");
SELECT COUNT(*) AS EXPECT_0 FROM mysql.gtid_slave_pos;
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
--connection node_1
SELECT COUNT(*) AS EXPECT_0 FROM mysql.gtid_slave_pos;
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
--connection node_3
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
#
# Cleanup
#
--connection node_2
STOP SLAVE;
RESET SLAVE ALL;
--connection node_3
RESET MASTER;
drop table t1;
--connection node_2
DROP TABLE t1;
--connection node_1
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
--connection node_1
--disconnect node_3
--source include/auto_increment_offset_restore.inc
--source include/galera_end.inc
--echo # End of test
......@@ -1726,6 +1726,11 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
thd->variables.pseudo_thread_id= thread_id; // for temp tables
DBUG_PRINT("query",("%s", thd->query()));
#ifdef WITH_WSREP
WSREP_DEBUG("Query_log_event thread=%llu for query=%s",
thd_get_thread_id(thd), wsrep_thd_query(thd));
#endif
if (unlikely(!(expected_error= error_code)) ||
ignored_error_code(expected_error) ||
!unexpected_error_code(expected_error))
......
......@@ -697,10 +697,11 @@ rpl_slave_state::record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
#ifdef WITH_WSREP
/*
We should replicate local gtid_slave_pos updates to other nodes.
We should replicate local gtid_slave_pos updates to other nodes if
wsrep gtid mode is set.
In applier we should not append them to galera writeset.
*/
if (WSREP_ON_ && wsrep_thd_is_local(thd))
if (WSREP_ON_ && wsrep_gtid_mode && wsrep_thd_is_local(thd))
{
thd->wsrep_ignore_table= false;
table->file->row_logging= 1; // replication requires binary logging
......@@ -877,10 +878,12 @@ rpl_slave_state::gtid_delete_pending(THD *thd,
#ifdef WITH_WSREP
/*
We should replicate local gtid_slave_pos updates to other nodes.
We should replicate local gtid_slave_pos updates to other nodes if
wsrep gtid mode is set.
In applier we should not append them to galera writeset.
*/
if (WSREP_ON_ && wsrep_thd_is_local(thd) &&
if (WSREP_ON_ && wsrep_gtid_mode &&
wsrep_thd_is_local(thd) &&
thd->wsrep_cs().state() != wsrep::client_state::s_none)
{
if (thd->wsrep_trx().active() == false)
......@@ -891,7 +894,8 @@ rpl_slave_state::gtid_delete_pending(THD *thd,
}
thd->wsrep_ignore_table= false;
}
thd->wsrep_ignore_table= true;
else
thd->wsrep_ignore_table= true;
#endif
thd_saved_option= thd->variables.option_bits;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment