Commit 467c7b2b authored by Marko Mäkelä's avatar Marko Mäkelä

Merge 10.7 into 10.8

parents e384299e 182bf9b3
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
sharedscripts sharedscripts
postrotate postrotate
test -x /usr/bin/mysqladmin || exit 0 test -x /usr/bin/mysqladmin || exit 0
if [ -f `my_print_defaults --mysqld | grep -oP "pid-file=\K[^$]+"` ]; then # check if server is running
# If this fails, check debian.conf! if mysqladmin ping > /dev/null 2>&1; then
mysqladmin --defaults-file=/etc/mysql/debian.cnf --local flush-error-log \ mysqladmin --defaults-file=/etc/mysql/debian.cnf --local flush-error-log \
flush-engine-log flush-general-log flush-slow-log flush-engine-log flush-general-log flush-slow-log
fi fi
......
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
#table containing single unique column #table containing single unique column
#table containing keys like unique(a,b,c,d) etc #table containing keys like unique(a,b,c,d) etc
#then table containing 2 blob unique etc #then table containing 2 blob unique etc
set @allowed_packet= @@max_allowed_packet;
#table with single long blob column; #table with single long blob column;
create table t1(a blob unique ); create table t1(a blob unique );
insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890); insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890);
...@@ -1226,44 +1225,6 @@ DB_ROW_HASH_1 ...@@ -1226,44 +1225,6 @@ DB_ROW_HASH_1
33 33
44 44
drop table t1,t2; drop table t1,t2;
#very long blob entry;
SET @@GLOBAL.max_allowed_packet=67108864;
connect 'newcon', localhost, root,,;
connection newcon;
show variables like 'max_allowed_packet';
Variable_name Value
max_allowed_packet 67108864
create table t1(a longblob unique, b longblob , c longblob , unique(b,c));
desc t1;
Field Type Null Key Default Extra
a longblob YES UNI NULL
b longblob YES MUL NULL
c longblob YES NULL
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` longblob DEFAULT NULL,
`b` longblob DEFAULT NULL,
`c` longblob DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH,
UNIQUE KEY `b` (`b`,`c`) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Ignored
t1 0 a 1 a A NULL NULL NULL YES HASH NO
t1 0 b 1 b A NULL NULL NULL YES HASH NO
t1 0 b 2 c A NULL NULL NULL YES HASH NO
insert into t1 values(concat(repeat('sachin',10000000),'1'),concat(repeat('sachin',10000000),'1'),
concat(repeat('sachin',10000000),'1'));
insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
concat(repeat('sachin',10000000),'1'));
insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
concat(repeat('sachin',10000000),'4'));
ERROR 23000: Duplicate entry 'sachinsachinsachinsachinsachinsachinsachinsachinsachinsachins...' for key 'a'
insert into t1 values(concat(repeat('sachin',10000000),'3'),concat(repeat('sachin',10000000),'1'),
concat(repeat('sachin',10000000),'1'));
ERROR 23000: Duplicate entry 'sachinsachinsachinsachinsachinsachinsachinsachinsachinsachins...' for key 'b'
drop table t1;
#long key unique with different key length #long key unique with different key length
create table t1(a blob, unique(a(3000))); create table t1(a blob, unique(a(3000)));
desc t1; desc t1;
...@@ -1308,9 +1269,6 @@ t1 0 a 2 b A NULL NULL NULL YES HASH NO ...@@ -1308,9 +1269,6 @@ t1 0 a 2 b A NULL NULL NULL YES HASH NO
t1 0 c 1 c A NULL 4500 NULL YES HASH NO t1 0 c 1 c A NULL 4500 NULL YES HASH NO
t1 0 c 2 d A NULL NULL NULL YES HASH NO t1 0 c 2 d A NULL NULL NULL YES HASH NO
drop table t1; drop table t1;
disconnect newcon;
connection default;
SET @@GLOBAL.max_allowed_packet=4194304;
#ext bug #ext bug
create table t1(a int primary key, b blob unique, c int, d blob , index(c)); create table t1(a int primary key, b blob unique, c int, d blob , index(c));
show create table t1; show create table t1;
...@@ -1478,6 +1436,7 @@ id select_type table type possible_keys key key_len ref rows Extra ...@@ -1478,6 +1436,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20; SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20;
b b
drop table t1,t2; drop table t1,t2;
# End of 10.4 tests
# #
# MDEV-21470 MyISAM start_bulk_insert doesn't work with long unique # MDEV-21470 MyISAM start_bulk_insert doesn't work with long unique
# #
...@@ -1493,5 +1452,4 @@ DROP TABLE t1, t2; ...@@ -1493,5 +1452,4 @@ DROP TABLE t1, t2;
# #
CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam; CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam;
ERROR HY000: AUTO_INCREMENT column `b` cannot be used in the UNIQUE index `a` ERROR HY000: AUTO_INCREMENT column `b` cannot be used in the UNIQUE index `a`
set @@GLOBAL.max_allowed_packet= @allowed_packet; # End of 10.5 tests
# End of 10.4 tests
...@@ -10,7 +10,7 @@ let datadir=`select @@datadir`; ...@@ -10,7 +10,7 @@ let datadir=`select @@datadir`;
--echo #table containing single unique column --echo #table containing single unique column
--echo #table containing keys like unique(a,b,c,d) etc --echo #table containing keys like unique(a,b,c,d) etc
--echo #then table containing 2 blob unique etc --echo #then table containing 2 blob unique etc
set @allowed_packet= @@max_allowed_packet;
--echo #table with single long blob column; --echo #table with single long blob column;
create table t1(a blob unique ); create table t1(a blob unique );
insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890); insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890);
...@@ -396,28 +396,6 @@ select DB_ROW_HASH_1 from t1,t2 where t1.DB_ROW_HASH_1 = t2.DB_ROW_HASH_2; ...@@ -396,28 +396,6 @@ select DB_ROW_HASH_1 from t1,t2 where t1.DB_ROW_HASH_1 = t2.DB_ROW_HASH_2;
select DB_ROW_HASH_1 from t1 inner join t2 on t1.a = t2.DB_ROW_HASH_2; select DB_ROW_HASH_1 from t1 inner join t2 on t1.a = t2.DB_ROW_HASH_2;
drop table t1,t2; drop table t1,t2;
--echo #very long blob entry;
SET @@GLOBAL.max_allowed_packet=67108864;
connect ('newcon', localhost, root,,);
--connection newcon
show variables like 'max_allowed_packet';
create table t1(a longblob unique, b longblob , c longblob , unique(b,c));
desc t1;
show create table t1;
show keys from t1;
insert into t1 values(concat(repeat('sachin',10000000),'1'),concat(repeat('sachin',10000000),'1'),
concat(repeat('sachin',10000000),'1'));
insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
concat(repeat('sachin',10000000),'1'));
--error ER_DUP_ENTRY
insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
concat(repeat('sachin',10000000),'4'));
--error ER_DUP_ENTRY
insert into t1 values(concat(repeat('sachin',10000000),'3'),concat(repeat('sachin',10000000),'1'),
concat(repeat('sachin',10000000),'1'));
drop table t1;
--echo #long key unique with different key length --echo #long key unique with different key length
create table t1(a blob, unique(a(3000))); create table t1(a blob, unique(a(3000)));
desc t1; desc t1;
...@@ -435,9 +413,7 @@ desc t1; ...@@ -435,9 +413,7 @@ desc t1;
show create table t1; show create table t1;
show keys from t1; show keys from t1;
drop table t1; drop table t1;
disconnect newcon;
--connection default
SET @@GLOBAL.max_allowed_packet=4194304;
--echo #ext bug --echo #ext bug
create table t1(a int primary key, b blob unique, c int, d blob , index(c)); create table t1(a int primary key, b blob unique, c int, d blob , index(c));
show create table t1; show create table t1;
...@@ -556,6 +532,8 @@ SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20; ...@@ -556,6 +532,8 @@ SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20;
SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20; SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20;
drop table t1,t2; drop table t1,t2;
--echo # End of 10.4 tests
--echo # --echo #
--echo # MDEV-21470 MyISAM start_bulk_insert doesn't work with long unique --echo # MDEV-21470 MyISAM start_bulk_insert doesn't work with long unique
--echo # --echo #
...@@ -573,6 +551,4 @@ DROP TABLE t1, t2; ...@@ -573,6 +551,4 @@ DROP TABLE t1, t2;
--error ER_NO_AUTOINCREMENT_WITH_UNIQUE --error ER_NO_AUTOINCREMENT_WITH_UNIQUE
CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam; CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam;
# Cleanup --echo # End of 10.5 tests
set @@GLOBAL.max_allowed_packet= @allowed_packet;
--echo # End of 10.4 tests
set @allowed_packet= @@max_allowed_packet;
SET GLOBAL max_allowed_packet=67108864;
connect con1, localhost, root,,;
create table t1(a longblob unique, b longblob , c longblob , unique(b,c));
desc t1;
Field Type Null Key Default Extra
a longblob YES UNI NULL
b longblob YES MUL NULL
c longblob YES NULL
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` longblob DEFAULT NULL,
`b` longblob DEFAULT NULL,
`c` longblob DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH,
UNIQUE KEY `b` (`b`,`c`) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Ignored
t1 0 a 1 a A NULL NULL NULL YES HASH NO
t1 0 b 1 b A NULL NULL NULL YES HASH NO
t1 0 b 2 c A NULL NULL NULL YES HASH NO
insert into t1 values(concat(repeat('sachin',10000000),'1'),concat(repeat('sachin',10000000),'1'),
concat(repeat('sachin',10000000),'1'));
insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
concat(repeat('sachin',10000000),'1'));
insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
concat(repeat('sachin',10000000),'4'));
ERROR 23000: Duplicate entry 'sachinsachinsachinsachinsachinsachinsachinsachinsachinsachins...' for key 'a'
insert into t1 values(concat(repeat('sachin',10000000),'3'),concat(repeat('sachin',10000000),'1'),
concat(repeat('sachin',10000000),'1'));
ERROR 23000: Duplicate entry 'sachinsachinsachinsachinsachinsachinsachinsachinsachinsachins...' for key 'b'
drop table t1;
disconnect con1;
connection default;
set @@GLOBAL.max_allowed_packet= @allowed_packet;
# End of 10.4 tests
# This test may run out of memory in some environments.
--source include/big_test.inc
set @allowed_packet= @@max_allowed_packet;
SET GLOBAL max_allowed_packet=67108864;
connect (con1, localhost, root,,);
create table t1(a longblob unique, b longblob , c longblob , unique(b,c));
desc t1;
show create table t1;
show keys from t1;
insert into t1 values(concat(repeat('sachin',10000000),'1'),concat(repeat('sachin',10000000),'1'),
concat(repeat('sachin',10000000),'1'));
insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
concat(repeat('sachin',10000000),'1'));
--error ER_DUP_ENTRY
insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
concat(repeat('sachin',10000000),'4'));
--error ER_DUP_ENTRY
insert into t1 values(concat(repeat('sachin',10000000),'3'),concat(repeat('sachin',10000000),'1'),
concat(repeat('sachin',10000000),'1'));
drop table t1;
disconnect con1;
connection default;
set @@GLOBAL.max_allowed_packet= @allowed_packet;
--echo # End of 10.4 tests
...@@ -45,3 +45,5 @@ partition : MDEV-19958 Galera test failure on galera.partition ...@@ -45,3 +45,5 @@ partition : MDEV-19958 Galera test failure on galera.partition
pxc-421: wsrep_provider is read-only for security reasons pxc-421: wsrep_provider is read-only for security reasons
query_cache: MDEV-15805 Test failure on galera.query_cache query_cache: MDEV-15805 Test failure on galera.query_cache
versioning_trx_id: MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch versioning_trx_id: MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch
galera_ssl_mode_server : Certificate CA mismatch
galera_bf_abort_at_after_statement : Unstable
...@@ -60,44 +60,6 @@ f1 f2 ...@@ -60,44 +60,6 @@ f1 f2
2 b 2 b
3 c 3 c
SET DEBUG_SYNC = "RESET"; SET DEBUG_SYNC = "RESET";
#
# test phase with real abort
#
connection node_3;
set binlog_format=ROW;
insert into t1 values (4, 'd');
SET AUTOCOMMIT=ON;
START TRANSACTION;
UPDATE t1 SET f2 = 'd' WHERE f1 = 3;
connection node_2a;
SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync';
SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
connection node_1;
UPDATE test.t1 SET f2 = 'e' WHERE f1 = 3;
connection node_3;
COMMIT;
connection node_2a;
SET GLOBAL debug_dbug = "";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
connection node_2a;
SET GLOBAL wsrep_provider_options = 'dbug=';
SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync';
SET DEBUG_SYNC = "RESET";
connection node_2a;
set session wsrep_sync_wait=0;
SELECT * from test.t1;
f1 f2
1 a
2 b
3 e
4 d
connection node_1;
SELECT * from test.t1;
f1 f2
1 a
2 b
3 e
4 d
connection node_2a; connection node_2a;
STOP SLAVE; STOP SLAVE;
RESET SLAVE; RESET SLAVE;
...@@ -105,3 +67,6 @@ DROP TABLE t1; ...@@ -105,3 +67,6 @@ DROP TABLE t1;
connection node_3; connection node_3;
DROP TABLE t1; DROP TABLE t1;
RESET MASTER; RESET MASTER;
connection node_1;
disconnect node_2a;
disconnect node_3;
...@@ -4,6 +4,7 @@ connection node_1; ...@@ -4,6 +4,7 @@ connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY, val INT); CREATE TABLE t1 (id INT PRIMARY KEY, val INT);
INSERT INTO t1 VALUES (1, 1); INSERT INTO t1 VALUES (1, 1);
connection node_2; connection node_2;
SET DEBUG_SYNC = 'RESET';
START TRANSACTION; START TRANSACTION;
SET DEBUG_SYNC = 'wsrep_after_statement_enter SIGNAL blocked WAIT_FOR continue'; SET DEBUG_SYNC = 'wsrep_after_statement_enter SIGNAL blocked WAIT_FOR continue';
UPDATE t1 SET val=2 WHERE id=1; UPDATE t1 SET val=2 WHERE id=1;
...@@ -18,4 +19,5 @@ ERROR 40001: Deadlock found when trying to get lock; try restarting transaction ...@@ -18,4 +19,5 @@ ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
COMMIT; COMMIT;
SET DEBUG_SYNC = 'RESET'; SET DEBUG_SYNC = 'RESET';
connection node_1; connection node_1;
SET DEBUG_SYNC = 'RESET';
DROP TABLE t1; DROP TABLE t1;
connection node_2; connection node_2;
connection node_1; connection node_1;
connection node_2; connection node_2;
SELECT @@wsrep_slave_threads;
@@wsrep_slave_threads
1
SET GLOBAL wsrep_slave_threads=2; SET GLOBAL wsrep_slave_threads=2;
Got one of the listed errors Got one of the listed errors
Got one of the listed errors Got one of the listed errors
Got one of the listed errors Got one of the listed errors
Got one of the listed errors Got one of the listed errors
SET GLOBAL wsrep_slave_threads=1; SET GLOBAL wsrep_slave_threads=DEFAULT;
connection node_1; connection node_1;
create table t1(a int not null primary key) engine=innodb; create table t1(a int not null primary key) engine=innodb;
insert into t1 values (1); insert into t1 values (1);
insert into t1 values (2); insert into t1 values (2);
connection node_2; connection node_2;
set global wsrep_sync_wait=15; # Wait until one of the appliers has exited
select count(*) from t1; select count(*) from t1;
count(*) count(*)
2 2
SELECT @@wsrep_slave_threads;
@@wsrep_slave_threads
1
connection node_1; connection node_1;
drop table t1; drop table t1;
connection node_2; connection node_2;
connection node_1; connection node_1;
connection node_1;
CREATE TABLE t1 ( CREATE TABLE t1 (
f1 VARCHAR(255) PRIMARY KEY f1 VARCHAR(255) PRIMARY KEY
) ENGINE=InnoDB DEFAULT CHARSET=utf8; ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
......
connection node_2; connection node_2;
connection node_1; connection node_1;
connection node_1; connection node_1;
CREATE TABLE ten (f1 INTEGER) Engine=InnoDB; CREATE TABLE ten (f1 INTEGER not null primary key) Engine=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; CREATE TABLE t1 (f1 INTEGER NOT NULL PRIMARY KEY) Engine=InnoDB;
INSERT INTO t1 SELECT f1 FROM ten ORDER BY RAND(); INSERT INTO t1 SELECT f1 FROM ten ORDER BY RAND();
connection node_2; connection node_2;
UPDATE IGNORE t1 SET f1 = FLOOR(1 + (RAND() * 10)) ORDER BY RAND() LIMIT 5; UPDATE IGNORE t1 SET f1 = FLOOR(1 + (RAND() * 10)) ORDER BY RAND() LIMIT 5;
......
...@@ -5,7 +5,9 @@ binlog-format=row ...@@ -5,7 +5,9 @@ binlog-format=row
[mysqld.1] [mysqld.1]
wsrep_restart_slave=1 wsrep_restart_slave=1
wsrep-debug=1
[mysqld.2] [mysqld.2]
wsrep_restart_slave=1 wsrep_restart_slave=1
wsrep-debug=1
...@@ -131,70 +131,6 @@ set session wsrep_sync_wait=0; ...@@ -131,70 +131,6 @@ set session wsrep_sync_wait=0;
SELECT * FROM t1; SELECT * FROM t1;
SET DEBUG_SYNC = "RESET"; SET DEBUG_SYNC = "RESET";
#********************************************************************************
# test phase 2
#********************************************************************************
--echo #
--echo # test phase with real abort
--echo #
--connection node_3
set binlog_format=ROW;
insert into t1 values (4, 'd');
SET AUTOCOMMIT=ON;
START TRANSACTION;
UPDATE t1 SET f2 = 'd' WHERE f1 = 3;
--connection node_2a
# wait for the last insert to be replicated from master
--let $wait_condition = SELECT COUNT(*) = 4 FROM test.t1;
--source include/wait_condition.inc
# Block the commit
--let $galera_sync_point = commit_monitor_enter_sync
--source include/galera_set_sync_point.inc
# block applier
SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
# Inject a conflicting update from node 3
--connection node_1
UPDATE test.t1 SET f2 = 'e' WHERE f1 = 3;
# send the update from master
--connection node_3
--error 0
COMMIT;
--connection node_2a
# release the applier
SET GLOBAL debug_dbug = "";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
# Unblock the async slave commit
--connection node_2a
--source include/galera_clear_sync_point.inc
--source include/galera_signal_sync_point.inc
SET DEBUG_SYNC = "RESET";
--connection node_2a
set session wsrep_sync_wait=0;
--let $wait_condition = SELECT COUNT(*) = 1 FROM test.t1 where f2 = 'e'
--source include/wait_condition.inc
SELECT * from test.t1;
--connection node_1
--let $wait_condition = SELECT COUNT(*) = 1 FROM test.t1 where f2 = 'e'
--source include/wait_condition.inc
SELECT * from test.t1;
--connection node_2a --connection node_2a
STOP SLAVE; STOP SLAVE;
RESET SLAVE; RESET SLAVE;
...@@ -204,3 +140,7 @@ DROP TABLE t1; ...@@ -204,3 +140,7 @@ DROP TABLE t1;
--connection node_3 --connection node_3
DROP TABLE t1; DROP TABLE t1;
RESET MASTER; RESET MASTER;
--connection node_1
--disconnect node_2a
--disconnect node_3
!include ../galera_2nodes.cnf
[mysqld.1]
wsrep-debug=1
[mysqld.2]
wsrep-debug=1
!include ../galera_2nodes.cnf
[mysqld.1]
wsrep-debug=1
[mysqld.2]
wsrep-debug=1
...@@ -8,10 +8,21 @@ ...@@ -8,10 +8,21 @@
--source include/have_debug_sync.inc --source include/have_debug_sync.inc
--connection node_1 --connection node_1
# Mare sure both nodes are in the cluster
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
CREATE TABLE t1 (id INT PRIMARY KEY, val INT); CREATE TABLE t1 (id INT PRIMARY KEY, val INT);
INSERT INTO t1 VALUES (1, 1); INSERT INTO t1 VALUES (1, 1);
--connection node_2 --connection node_2
SET DEBUG_SYNC = 'RESET';
# Mare sure that DLL has replicated and insert has replicated
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM t1
--source include/wait_condition.inc
START TRANSACTION; START TRANSACTION;
SET DEBUG_SYNC = 'wsrep_after_statement_enter SIGNAL blocked WAIT_FOR continue'; SET DEBUG_SYNC = 'wsrep_after_statement_enter SIGNAL blocked WAIT_FOR continue';
--send UPDATE t1 SET val=2 WHERE id=1 --send UPDATE t1 SET val=2 WHERE id=1
...@@ -34,4 +45,5 @@ COMMIT; ...@@ -34,4 +45,5 @@ COMMIT;
SET DEBUG_SYNC = 'RESET'; SET DEBUG_SYNC = 'RESET';
--connection node_1 --connection node_1
SET DEBUG_SYNC = 'RESET';
DROP TABLE t1; DROP TABLE t1;
!include ../galera_2nodes.cnf
[mysqld.1]
wsrep-debug=1
[mysqld.2]
wsrep-debug=1
...@@ -7,9 +7,10 @@ ...@@ -7,9 +7,10 @@
--source include/force_restart.inc --source include/force_restart.inc
--connection node_2 --connection node_2
SELECT @@wsrep_slave_threads;
SET GLOBAL wsrep_slave_threads=2; SET GLOBAL wsrep_slave_threads=2;
--let $wait_condition = SELECT COUNT(*) >= 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle'; --let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle';
--let $wait_condition_on_error_output = SELECT COUNT(*), 2 as EXPECTED_VALUE FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle'; show processlist --let $wait_condition_on_error_output = SELECT COUNT(*), 2 as EXPECTED_VALUE FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle'; show processlist
--source include/wait_condition_with_debug.inc --source include/wait_condition_with_debug.inc
...@@ -31,7 +32,7 @@ SET GLOBAL wsrep_slave_threads=2; ...@@ -31,7 +32,7 @@ SET GLOBAL wsrep_slave_threads=2;
--eval KILL QUERY $aborter_thread --eval KILL QUERY $aborter_thread
--enable_query_log --enable_query_log
SET GLOBAL wsrep_slave_threads=1; SET GLOBAL wsrep_slave_threads=DEFAULT;
--connection node_1 --connection node_1
create table t1(a int not null primary key) engine=innodb; create table t1(a int not null primary key) engine=innodb;
...@@ -39,8 +40,13 @@ insert into t1 values (1); ...@@ -39,8 +40,13 @@ insert into t1 values (1);
insert into t1 values (2); insert into t1 values (2);
--connection node_2 --connection node_2
set global wsrep_sync_wait=15; --echo # Wait until one of the appliers has exited
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
--let $wait_condition_on_error_output = SELECT COUNT(*), 1 as EXPECTED_VALUE FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle'; show processlist
--source include/wait_condition_with_debug.inc
select count(*) from t1; select count(*) from t1;
SELECT @@wsrep_slave_threads;
--connection node_1 --connection node_1
drop table t1; drop table t1;
!include ../galera_2nodes.cnf
[mysqld.1]
wsrep-debug=1
[mysqld.2]
wsrep-debug=1
!include ../galera_2nodes.cnf
[mysqld.1]
wsrep-debug=1
[mysqld.2]
wsrep-debug=1
...@@ -5,6 +5,11 @@ ...@@ -5,6 +5,11 @@
--source include/galera_cluster.inc --source include/galera_cluster.inc
--source include/have_innodb.inc --source include/have_innodb.inc
--connection node_1
# Mare sure both nodes are in the cluster
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
CREATE TABLE t1 ( CREATE TABLE t1 (
f1 VARCHAR(255) PRIMARY KEY f1 VARCHAR(255) PRIMARY KEY
) ENGINE=InnoDB DEFAULT CHARSET=utf8; ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
...@@ -12,7 +17,12 @@ CREATE TABLE t1 ( ...@@ -12,7 +17,12 @@ CREATE TABLE t1 (
INSERT INTO t1 VALUES ('текст'); INSERT INTO t1 VALUES ('текст');
--connection node_2 --connection node_2
--source include/wait_until_ready.inc # Mare sure that DLL has replicated and insert has replicated
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM t1
--source include/wait_condition.inc
SELECT f1 = 'текст' FROM t1; SELECT f1 = 'текст' FROM t1;
# #
......
!include ../galera_2nodes.cnf
[mysqld.1]
wsrep-debug=1
[mysqld.2]
wsrep-debug=1
...@@ -4,17 +4,16 @@ ...@@ -4,17 +4,16 @@
# #
--source include/galera_cluster.inc --source include/galera_cluster.inc
--source include/have_innodb.inc
# #
# With a PK # With a PK
# #
--connection node_1 --connection node_1
CREATE TABLE ten (f1 INTEGER) Engine=InnoDB; CREATE TABLE ten (f1 INTEGER not null primary key) Engine=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; CREATE TABLE t1 (f1 INTEGER NOT NULL PRIMARY KEY) Engine=InnoDB;
INSERT INTO t1 SELECT f1 FROM ten ORDER BY RAND(); INSERT INTO t1 SELECT f1 FROM ten ORDER BY RAND();
--connection node_2 --connection node_2
......
...@@ -250,8 +250,9 @@ static int ptr_cmp(void **a, void **b) ...@@ -250,8 +250,9 @@ static int ptr_cmp(void **a, void **b)
#define add_to_purgatory(PINS, ADDR) \ #define add_to_purgatory(PINS, ADDR) \
do \ do \
{ \ { \
*(void **)((char *)(ADDR)+(PINS)->pinbox->free_ptr_offset)= \ my_atomic_storeptr_explicit( \
(PINS)->purgatory; \ (void **)((char *)(ADDR)+(PINS)->pinbox->free_ptr_offset), \
(PINS)->purgatory, MY_MEMORY_ORDER_RELEASE); \
(PINS)->purgatory= (ADDR); \ (PINS)->purgatory= (ADDR); \
(PINS)->purgatory_count++; \ (PINS)->purgatory_count++; \
} while (0) } while (0)
......
...@@ -103,8 +103,9 @@ static int l_find(LF_SLIST **head, CHARSET_INFO *cs, uint32 hashnr, ...@@ -103,8 +103,9 @@ static int l_find(LF_SLIST **head, CHARSET_INFO *cs, uint32 hashnr,
do { /* PTR() isn't necessary below, head is a dummy node */ do { /* PTR() isn't necessary below, head is a dummy node */
cursor->curr= my_assume_aligned<sizeof(LF_SLIST *)>((LF_SLIST *)(*cursor->prev)); cursor->curr= my_assume_aligned<sizeof(LF_SLIST *)>((LF_SLIST *)(*cursor->prev));
lf_pin(pins, 1, cursor->curr); lf_pin(pins, 1, cursor->curr);
} while (my_atomic_loadptr((void **)my_assume_aligned<sizeof(LF_SLIST *)>(cursor->prev)) != cursor->curr && } while (my_atomic_loadptr(
LF_BACKOFF()); (void **)my_assume_aligned<sizeof(LF_SLIST *)>(cursor->prev))
!= cursor->curr && LF_BACKOFF());
for (;;) for (;;)
{ {
if (unlikely(!cursor->curr)) if (unlikely(!cursor->curr))
...@@ -114,14 +115,17 @@ static int l_find(LF_SLIST **head, CHARSET_INFO *cs, uint32 hashnr, ...@@ -114,14 +115,17 @@ static int l_find(LF_SLIST **head, CHARSET_INFO *cs, uint32 hashnr,
cur_keylen= cursor->curr->keylen; cur_keylen= cursor->curr->keylen;
/* The key element needs to be aligned, not necessary what it points to */ /* The key element needs to be aligned, not necessary what it points to */
my_assume_aligned<sizeof(const uchar *)>(&cursor->curr->key); my_assume_aligned<sizeof(const uchar *)>(&cursor->curr->key);
cur_key= cursor->curr->key; cur_key= (const uchar *) my_atomic_loadptr_explicit((void **) &cursor->curr->key,
MY_MEMORY_ORDER_ACQUIRE);
do { do {
/* attempting to my_assume_aligned onlink below broke the implementation */ /* attempting to my_assume_aligned onlink below broke the implementation */
link= cursor->curr->link; link= (intptr) my_atomic_loadptr_explicit((void **) &cursor->curr->link,
MY_MEMORY_ORDER_RELAXED);
cursor->next= my_assume_aligned<sizeof(LF_SLIST *)>(PTR(link)); cursor->next= my_assume_aligned<sizeof(LF_SLIST *)>(PTR(link));
lf_pin(pins, 0, cursor->next); lf_pin(pins, 0, cursor->next);
} while (link != cursor->curr->link && LF_BACKOFF()); } while (link != (intptr) my_atomic_loadptr((void *volatile *) &cursor->curr->link)
&& LF_BACKOFF());
if (!DELETED(link)) if (!DELETED(link))
{ {
......
...@@ -206,7 +206,7 @@ enum mrec_type_t ...@@ -206,7 +206,7 @@ enum mrec_type_t
/** Zero-initialize a page. The current byte offset (for subsequent /** Zero-initialize a page. The current byte offset (for subsequent
records) will be reset to FIL_PAGE_TYPE. */ records) will be reset to FIL_PAGE_TYPE. */
INIT_PAGE= 0x10, INIT_PAGE= 0x10,
/** Insert a record into a page. FIXME: implement this! */ /** Extended record; @see mrec_ext_t */
EXTENDED= 0x20, EXTENDED= 0x20,
/** Write a string of bytes. Followed by the byte offset (unsigned, /** Write a string of bytes. Followed by the byte offset (unsigned,
relative to the current byte offset, encoded in 1 to 3 bytes) and relative to the current byte offset, encoded in 1 to 3 bytes) and
......
...@@ -113,9 +113,10 @@ struct page_zip_des_t ...@@ -113,9 +113,10 @@ struct page_zip_des_t
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
void clear() { void clear() {
memset((void*) this, 0, sizeof(data) + sizeof(uint32_t)); /* Clear everything except the member "fix". */
ut_d(m_start = 0); memset((void*) this, 0,
ut_d(m_external = false); reinterpret_cast<char*>(&fix)
- reinterpret_cast<char*>(this));
} }
private: private:
......
...@@ -613,9 +613,7 @@ log_write_buf( ...@@ -613,9 +613,7 @@ log_write_buf(
log_block_store_checksum(buf + i * OS_FILE_LOG_BLOCK_SIZE); log_block_store_checksum(buf + i * OS_FILE_LOG_BLOCK_SIZE);
} }
ut_a((next_offset >> srv_page_size_shift) <= ULINT_MAX); log_sys.log.write(next_offset, {buf, write_len});
log_sys.log.write(static_cast<size_t>(next_offset), {buf, write_len});
if (write_len < len) { if (write_len < len) {
start_lsn += write_len; start_lsn += write_len;
......
...@@ -1457,8 +1457,7 @@ dberr_t srv_start(bool create_new_db) ...@@ -1457,8 +1457,7 @@ dberr_t srv_start(bool create_new_db)
} }
/* fall through */ /* fall through */
case SRV_OPERATION_RESTORE: case SRV_OPERATION_RESTORE:
/* This must precede /* This must precede recv_sys.apply(true). */
recv_apply_hashed_log_recs(true). */
srv_undo_tablespaces_active srv_undo_tablespaces_active
= trx_rseg_get_n_undo_tablespaces(); = trx_rseg_get_n_undo_tablespaces();
err = srv_validate_undo_tablespaces(); err = srv_validate_undo_tablespaces();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment