Commit 50ef0066 authored by Sergei Golubchik's avatar Sergei Golubchik

Merge branch '10.0' into bb-10.0-serg

parents 928edb5a d6d54584
......@@ -7858,3 +7858,56 @@ v1
DROP PROCEDURE p1;
DROP TABLE t1;
# End of 5.5 test
#
# MDEV-7040: Crash in field_conv, memcpy_field_possible, part#2
#
create table t1 (
col1 bigint(20),
col2 char(1),
col3 char(2)
);
insert into t1 values (1,'a','a'), (2,'b','b');
create table t2 as select * from t1;
create table t3 as select * from t1;
create table t4 as select * from t1;
create table t5 as select * from t1;
create table t6 as select * from t1;
flush tables;
CREATE PROCEDURE p1()
begin
DECLARE _var1 bigint(20) UNSIGNED;
DECLARE _var2 CHAR(1) DEFAULT NULL;
DECLARE _var3 CHAR(1) DEFAULT NULL;
DECLARE _done BOOLEAN DEFAULT 0;
declare cur1 cursor for
select col1, col2, col3
from t1
where
col1 in (select t2.col1 from t2 where t2.col2=t1.col2) or
col2 in (select t3.col3 from t3 where t3.col3=t1.col2) ;
DECLARE CONTINUE HANDLER FOR NOT FOUND SET _done = 1;
OPEN cur1;
set _var1 = (select _var1 from t4 limit 1);
set _var1 = (select _var1 from t5 limit 1);
set _var1 = (select _var1 from t6 limit 1);
label1:
LOOP
SET _done = 0;
FETCH cur1 INTO _var1, _var2, _var3;
IF _done THEN
LEAVE label1;
END IF;
END LOOP label1;
CLOSE cur1;
end|
set @tmp_toc= @@table_open_cache;
set @tmp_tdc= @@table_definition_cache;
set global table_open_cache=1;
set global table_definition_cache=1;
Warnings:
Warning 1292 Truncated incorrect table_definition_cache value: '1'
call p1();
set global table_open_cache= @tmp_toc;
set global table_definition_cache= @tmp_tdc;
drop procedure p1;
drop table t1,t2,t3,t4,t5,t6;
......@@ -2215,6 +2215,24 @@ Handler_read_rnd_next 6003
Handler_tmp_write 2000
Handler_write 1000
drop table t0,t1,t2,t3;
#
# MDEV-7971: Assertion `name != __null' failed in ACL_internal_schema_registry::lookup
# on 2nd execution os PS with multi-table update
#
CREATE TABLE t1 (f1 INT);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2 (f2 INT);
INSERT INTO t2 VALUES (3),(4);
CREATE TABLE t3 (f3 INT);
INSERT INTO t3 VALUES (5),(6);
PREPARE stmt FROM '
UPDATE t1, t2
SET f1 = 5
WHERE 8 IN ( SELECT MIN(f3) FROM t3 )
';
EXECUTE stmt;
EXECUTE stmt;
DROP TABLE t1,t2,t3;
set @subselect_mat_test_optimizer_switch_value=null;
set @@optimizer_switch='materialization=on,in_to_exists=off,semijoin=off';
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
......
......@@ -2255,3 +2255,21 @@ Handler_read_rnd_next 6003
Handler_tmp_write 2000
Handler_write 1000
drop table t0,t1,t2,t3;
#
# MDEV-7971: Assertion `name != __null' failed in ACL_internal_schema_registry::lookup
# on 2nd execution os PS with multi-table update
#
CREATE TABLE t1 (f1 INT);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2 (f2 INT);
INSERT INTO t2 VALUES (3),(4);
CREATE TABLE t3 (f3 INT);
INSERT INTO t3 VALUES (5),(6);
PREPARE stmt FROM '
UPDATE t1, t2
SET f1 = 5
WHERE 8 IN ( SELECT MIN(f3) FROM t3 )
';
EXECUTE stmt;
EXECUTE stmt;
DROP TABLE t1,t2,t3;
......@@ -1649,6 +1649,46 @@ include/stop_slave.inc
SET GLOBAL debug_dbug= @old_debg;
SET GLOBAL max_relay_log_size= @old_max;
include/start_slave.inc
*** MDEV-8302: Duplicate key with parallel replication ***
include/stop_slave.inc
/* Inject a small sleep which makes the race easier to hit. */
SET @old_dbug=@@GLOBAL.debug_dbug;
SET GLOBAL debug_dbug="+d,inject_mdev8302";
INSERT INTO t7 VALUES (100,1), (101,2), (102,3), (103,4), (104,5);
SET @old_dbug= @@SESSION.debug_dbug;
SET @commit_id= 20000;
SET SESSION debug_dbug="+d,binlog_force_commit_id";
SET SESSION debug_dbug=@old_dbug;
SELECT * FROM t7 ORDER BY a;
a b
1 1
2 2
3 86
4 4
5 5
100 5
101 1
102 2
103 3
104 4
include/save_master_gtid.inc
include/start_slave.inc
include/sync_with_master_gtid.inc
SELECT * FROM t7 ORDER BY a;
a b
1 1
2 2
3 86
4 4
5 5
100 5
101 1
102 2
103 3
104 4
include/stop_slave.inc
SET GLOBAL debug_dbug=@old_dbug;
include/start_slave.inc
include/stop_slave.inc
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
include/start_slave.inc
......
......@@ -2314,6 +2314,60 @@ SET GLOBAL max_relay_log_size= @old_max;
--source include/start_slave.inc
--echo *** MDEV-8302: Duplicate key with parallel replication ***
--connection server_2
--source include/stop_slave.inc
/* Inject a small sleep which makes the race easier to hit. */
SET @old_dbug=@@GLOBAL.debug_dbug;
SET GLOBAL debug_dbug="+d,inject_mdev8302";
--connection server_1
INSERT INTO t7 VALUES (100,1), (101,2), (102,3), (103,4), (104,5);
# Artificially create a bunch of group commits with conflicting transactions.
# The bug happened when T1 and T2 was in one group commit, and T3 was in the
# following group commit. T2 is a DELETE of a row with same primary key as a
# row that T3 inserts. T1 and T2 can conflict, causing T2 to be deadlock
# killed after starting to commit. The bug was that T2 could roll back before
# doing unmark_start_commit(); this could allow T3 to run before the retry
# of T2, causing duplicate key violation.
SET @old_dbug= @@SESSION.debug_dbug;
SET @commit_id= 20000;
SET SESSION debug_dbug="+d,binlog_force_commit_id";
--let $n = 100
--disable_query_log
while ($n)
{
eval UPDATE t7 SET b=b+1 WHERE a=100+($n MOD 5);
eval DELETE FROM t7 WHERE a=100+($n MOD 5);
SET @commit_id = @commit_id + 1;
eval INSERT INTO t7 VALUES (100+($n MOD 5), $n);
SET @commit_id = @commit_id + 1;
dec $n;
}
--enable_query_log
SET SESSION debug_dbug=@old_dbug;
SELECT * FROM t7 ORDER BY a;
--source include/save_master_gtid.inc
--connection server_2
--source include/start_slave.inc
--source include/sync_with_master_gtid.inc
SELECT * FROM t7 ORDER BY a;
--source include/stop_slave.inc
SET GLOBAL debug_dbug=@old_dbug;
--source include/start_slave.inc
# Clean up.
--connection server_2
......
......@@ -9303,3 +9303,71 @@ DROP PROCEDURE p1;
DROP TABLE t1;
--echo # End of 5.5 test
--echo #
--echo # MDEV-7040: Crash in field_conv, memcpy_field_possible, part#2
--echo #
create table t1 (
col1 bigint(20),
col2 char(1),
col3 char(2)
);
insert into t1 values (1,'a','a'), (2,'b','b');
create table t2 as select * from t1;
create table t3 as select * from t1;
create table t4 as select * from t1;
create table t5 as select * from t1;
create table t6 as select * from t1;
flush tables;
DELIMITER |;
CREATE PROCEDURE p1()
begin
DECLARE _var1 bigint(20) UNSIGNED;
DECLARE _var2 CHAR(1) DEFAULT NULL;
DECLARE _var3 CHAR(1) DEFAULT NULL;
DECLARE _done BOOLEAN DEFAULT 0;
declare cur1 cursor for
select col1, col2, col3
from t1
where
col1 in (select t2.col1 from t2 where t2.col2=t1.col2) or
col2 in (select t3.col3 from t3 where t3.col3=t1.col2) ;
DECLARE CONTINUE HANDLER FOR NOT FOUND SET _done = 1;
OPEN cur1;
set _var1 = (select _var1 from t4 limit 1);
set _var1 = (select _var1 from t5 limit 1);
set _var1 = (select _var1 from t6 limit 1);
label1:
LOOP
SET _done = 0;
FETCH cur1 INTO _var1, _var2, _var3;
IF _done THEN
LEAVE label1;
END IF;
END LOOP label1;
CLOSE cur1;
end|
DELIMITER ;|
set @tmp_toc= @@table_open_cache;
set @tmp_tdc= @@table_definition_cache;
set global table_open_cache=1;
set global table_definition_cache=1;
call p1();
set global table_open_cache= @tmp_toc;
set global table_definition_cache= @tmp_tdc;
drop procedure p1;
drop table t1,t2,t3,t4,t5,t6;
......@@ -1900,3 +1900,28 @@ select * from t1 where (a,b) in (select max(a),b from t2 group by b);
show status where Variable_name like 'Handler_read%' or Variable_name like 'Handler_%write%';
drop table t0,t1,t2,t3;
--echo #
--echo # MDEV-7971: Assertion `name != __null' failed in ACL_internal_schema_registry::lookup
--echo # on 2nd execution os PS with multi-table update
--echo #
CREATE TABLE t1 (f1 INT);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2 (f2 INT);
INSERT INTO t2 VALUES (3),(4);
CREATE TABLE t3 (f3 INT);
INSERT INTO t3 VALUES (5),(6);
PREPARE stmt FROM '
UPDATE t1, t2
SET f1 = 5
WHERE 8 IN ( SELECT MIN(f3) FROM t3 )
';
EXECUTE stmt;
EXECUTE stmt;
DROP TABLE t1,t2,t3;
......@@ -1582,6 +1582,26 @@ int ha_rollback_trans(THD *thd, bool all)
DBUG_ASSERT(thd->transaction.stmt.ha_list == NULL ||
trans == &thd->transaction.stmt);
#ifdef HAVE_REPLICATION
if (is_real_trans)
{
/*
In parallel replication, if we need to rollback during commit, we must
first inform following transactions that we are going to abort our commit
attempt. Otherwise those following transactions can run too early, and
possibly cause replication to fail. See comments in retry_event_group().
There were several bugs with this in the past that were very hard to
track down (MDEV-7458, MDEV-8302). So we add here an assertion for
rollback without signalling following transactions. And in release
builds, we explicitly do the signalling before rolling back.
*/
DBUG_ASSERT(!(thd->rgi_slave && thd->rgi_slave->did_mark_start_commit));
if (thd->rgi_slave && thd->rgi_slave->did_mark_start_commit)
thd->rgi_slave->unmark_start_commit();
}
#endif
if (thd->in_sub_stmt)
{
DBUG_ASSERT(0);
......
......@@ -4277,7 +4277,6 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
"mysql", rpl_gtid_slave_state_table_name.str,
errcode,
thd->get_stmt_da()->message());
trans_rollback(thd);
sub_id= 0;
thd->is_slave_error= 1;
goto end;
......@@ -7367,7 +7366,6 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
"%s.%s: %d: %s",
"mysql", rpl_gtid_slave_state_table_name.str, ec,
thd->get_stmt_da()->message());
trans_rollback(thd);
thd->is_slave_error= 1;
return err;
}
......
......@@ -226,6 +226,11 @@ static void
signal_error_to_sql_driver_thread(THD *thd, rpl_group_info *rgi, int err)
{
rgi->worker_error= err;
/*
In case we get an error during commit, inform following transactions that
we aborted our commit.
*/
rgi->unmark_start_commit();
rgi->cleanup_context(thd, true);
rgi->rli->abort_slave= true;
rgi->rli->stop_for_until= false;
......@@ -370,6 +375,7 @@ do_retry:
transaction we deadlocked with will not signal that it started to commit
until after the unmark.
*/
DBUG_EXECUTE_IF("inject_mdev8302", { my_sleep(20000);});
rgi->unmark_start_commit();
DEBUG_SYNC(thd, "rpl_parallel_retry_after_unmark");
......@@ -883,11 +889,26 @@ handle_rpl_parallel_thread(void *arg)
group_rgi= rgi;
group_ending= is_group_ending(qev->ev, event_type);
if (group_ending && likely(!rgi->worker_error))
{
/*
Do an extra check for (deadlock) kill here. This helps prevent a
lingering deadlock kill that occured during normal DML processing to
propagate past the mark_start_commit(). If we detect a deadlock only
after mark_start_commit(), we have to unmark, which has at least a
theoretical possibility of leaving a window where it looks like all
transactions in a GCO have started committing, while in fact one
will need to rollback and retry. This is not supposed to be possible
(since there is a deadlock, at least one transaction should be
blocked from reaching commit), but this seems a fragile ensurance,
and there were historically a number of subtle bugs in this area.
*/
if (!thd->killed)
{
DEBUG_SYNC(thd, "rpl_parallel_before_mark_start_commit");
rgi->mark_start_commit();
DEBUG_SYNC(thd, "rpl_parallel_after_mark_start_commit");
}
}
/*
If the SQL thread is stopping, we just skip execution of all the
......@@ -911,7 +932,17 @@ handle_rpl_parallel_thread(void *arg)
});
if (!err)
#endif
{
if (thd->check_killed())
{
thd->clear_error();
thd->get_stmt_da()->reset_diagnostics_area();
thd->send_kill_message();
err= 1;
}
else
err= rpt_handle_event(qev, rpt);
}
delete_or_keep_event_post_apply(rgi, event_type, qev->ev);
DBUG_EXECUTE_IF("rpl_parallel_simulate_temp_err_gtid_0_x_100",
err= dbug_simulate_tmp_error(rgi, thd););
......
......@@ -54,6 +54,8 @@ public:
virtual void fetch(ulong num_rows);
virtual void close();
virtual ~Materialized_cursor();
void on_table_fill_finished();
};
......@@ -74,6 +76,18 @@ public:
Select_materialize(select_result *result_arg)
:result(result_arg), materialized_cursor(0) {}
virtual bool send_result_set_metadata(List<Item> &list, uint flags);
bool send_eof()
{
if (materialized_cursor)
materialized_cursor->on_table_fill_finished();
return false;
}
void abort_result_set()
{
if (materialized_cursor)
materialized_cursor->on_table_fill_finished();
}
};
......@@ -388,6 +402,29 @@ Materialized_cursor::~Materialized_cursor()
}
/*
@brief
Perform actions that are to be done when cursor materialization has
finished.
@detail
This function is called when "OPEN $cursor" has finished filling the
temporary table with rows that the cursor will return.
Temporary table has table->field->orig_table pointing at the tables
that are used in the cursor definition query. Pointers to these tables
will not be valid after the query finishes. So, we do what is done for
regular tables: have orig_table point at the table that the fields belong
to.
*/
void Materialized_cursor::on_table_fill_finished()
{
uint fields= table->s->fields;
for (uint i= 0; i < fields; i++)
table->field[i]->orig_table= table->field[i]->table;
}
/***************************************************************************
Select_materialize
****************************************************************************/
......
......@@ -7751,6 +7751,8 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables)
*/
for (table= tables; table; table= table->next_local)
{
if (table->is_jtbm())
continue;
if (table->derived)
table->grant.privilege= SELECT_ACL;
else if ((check_access(thd, UPDATE_ACL, table->db,
......
......@@ -1161,7 +1161,7 @@ bool unsafe_key_update(List<TABLE_LIST> leaves, table_map tables_for_update)
while ((tl= it++))
{
if (tl->table->map & tables_for_update)
if (!tl->is_jtbm() && (tl->table->map & tables_for_update))
{
TABLE *table1= tl->table;
bool primkey_clustered= (table1->file->primary_key_is_clustered() &&
......@@ -1178,6 +1178,8 @@ bool unsafe_key_update(List<TABLE_LIST> leaves, table_map tables_for_update)
it2.rewind();
while ((tl2= it2++))
{
if (tl2->is_jtbm())
continue;
/*
Look at "next" tables only since all previous tables have
already been checked
......@@ -1409,6 +1411,9 @@ int mysql_multi_update_prepare(THD *thd)
{
TABLE *table= tl->table;
if (tl->is_jtbm())
continue;
/* if table will be updated then check that it is unique */
if (table->map & tables_for_update)
{
......@@ -1457,6 +1462,8 @@ int mysql_multi_update_prepare(THD *thd)
for (tl= table_list; tl; tl= tl->next_local)
{
bool not_used= false;
if (tl->is_jtbm())
continue;
if (multi_update_check_table_access(thd, tl, tables_for_update, &not_used))
DBUG_RETURN(TRUE);
}
......@@ -1464,6 +1471,8 @@ int mysql_multi_update_prepare(THD *thd)
/* check single table update for view compound from several tables */
for (tl= table_list; tl; tl= tl->next_local)
{
if (tl->is_jtbm())
continue;
if (tl->is_merged_derived())
{
TABLE_LIST *for_update= 0;
......@@ -1493,6 +1502,8 @@ int mysql_multi_update_prepare(THD *thd)
ti.rewind();
while ((tl= ti++))
{
if (tl->is_jtbm())
continue;
TABLE *table= tl->table;
TABLE_LIST *tlist;
if (!(tlist= tl->top_table())->derived)
......@@ -1635,6 +1646,9 @@ int multi_update::prepare(List<Item> &not_used_values,
*/
while ((table_ref= ti++))
{
if (table_ref->is_jtbm())
continue;
TABLE *table= table_ref->table;
if (tables_to_update & table->map)
{
......@@ -1654,6 +1668,9 @@ int multi_update::prepare(List<Item> &not_used_values,
ti.rewind();
while ((table_ref= ti++))
{
if (table_ref->is_jtbm())
continue;
TABLE *table= table_ref->table;
if (tables_to_update & table->map)
{
......@@ -1684,6 +1701,8 @@ int multi_update::prepare(List<Item> &not_used_values,
while ((table_ref= ti++))
{
/* TODO: add support of view of join support */
if (table_ref->is_jtbm())
continue;
TABLE *table=table_ref->table;
leaf_table_count++;
if (tables_to_update & table->map)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment