Commit b2be2e39 authored by Sergei Golubchik's avatar Sergei Golubchik

don't crash if ALTER TABLE fails and a long transaction blocks MDL upgrade

parent 332f41aa
......@@ -22,6 +22,36 @@ a b
123 NULL
456 NULL
789 NULL
# Insert, error
create or replace table t1 (a int);
insert t1 values (5), (5);
connection con2;
set debug_sync= 'now WAIT_FOR ended';
connection default;
set session lock_wait_timeout=1;
set debug_sync= 'alter_table_copy_end SIGNAL ended WAIT_FOR end';
alter table t1 add unique (a), algorithm= copy, lock= none;
connection con2;
start transaction;
insert into t1 values (123), (456), (789);
set debug_sync= 'now SIGNAL end';
connection default;
ERROR 23000: Duplicate entry '5' for key 'a'
connection con2;
commit;
connection default;
select variable_value into @otd from information_schema.session_status where variable_name='Opened_table_definitions';
select * from t1;
a
5
5
123
456
789
select variable_value-@otd from information_schema.session_status where variable_name='Opened_table_definitions';
variable_value-@otd
1
set session lock_wait_timeout=default;
# long transaction and add column
create or replace table t1 (a int);
insert t1 values (5);
......
......@@ -34,6 +34,38 @@ set debug_sync= 'now SIGNAL end';
--reap
select * from t1;
--echo # Insert, error
create or replace table t1 (a int);
insert t1 values (5), (5);
--connection con2
--send
set debug_sync= 'now WAIT_FOR ended';
--connection default
set session lock_wait_timeout=1;
set debug_sync= 'alter_table_copy_end SIGNAL ended WAIT_FOR end';
--send
alter table t1 add unique (a), algorithm= copy, lock= none;
--connection con2
--reap
start transaction;
insert into t1 values (123), (456), (789);
set debug_sync= 'now SIGNAL end';
--connection default
--error ER_DUP_ENTRY
--reap
--connection con2
commit;
--connection default
select variable_value into @otd from information_schema.session_status where variable_name='Opened_table_definitions';
select * from t1;
select variable_value-@otd from information_schema.session_status where variable_name='Opened_table_definitions';
set session lock_wait_timeout=default;
--echo # long transaction and add column
create or replace table t1 (a int);
insert t1 values (5);
......
......@@ -398,9 +398,10 @@ qq
*a *a*a *
*a *a*a *
*a *a*a *
flush tables;
explain select * from t1 where v='a';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 13 const 10 Using where
1 SIMPLE t1 ref v v 13 const 9 Using where
select v,count(*) from t1 group by v limit 10;
v count(*)
a 1
......
......@@ -277,6 +277,7 @@ explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a '
--error ER_DUP_ENTRY
alter table t1 add unique(v);
select concat('*',v,'*',c,'*',t,'*') as qq from t1 where v='a' order by length(concat('*',v,'*',c,'*',t,'*'));
flush tables;
explain select * from t1 where v='a';
# GROUP BY
......
......@@ -11592,17 +11592,7 @@ static int online_alter_read_from_binlog(THD *thd, rpl_group_info *rgi,
return error;
}
#endif // HAVE_REPLICATION
static void online_alter_cleanup_binlog(THD *thd, TABLE_SHARE *s)
{
#ifdef HAVE_REPLICATION
if (!s->online_alter_binlog)
return;
s->online_alter_binlog->cleanup();
s->online_alter_binlog= NULL;
#endif
}
static int
copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
......@@ -11805,7 +11795,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
#ifdef HAVE_REPLICATION
if (online)
{
from->s->online_alter_binlog= new (thd->mem_root) Cache_flip_event_log();
from->s->online_alter_binlog= new (&from->s->mem_root) Cache_flip_event_log();
if (!from->s->online_alter_binlog)
DBUG_RETURN(1);
from->s->online_alter_binlog->init_pthread_objects();
......@@ -11813,7 +11803,8 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
if (error)
{
online_alter_cleanup_binlog(thd, from->s);
from->s->online_alter_binlog->cleanup();
from->s->online_alter_binlog= NULL;
goto err;
}
......@@ -12023,21 +12014,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
}
else if (online) // error was on copy stage
{
/*
We need to issue a barrier to clean up gracefully.
Without this, following possible:
T1: ALTER TABLE starts
T2: INSERT starts
T1: ALTER TABLE fails with error (i.e. ER_DUP_KEY)
T1: from->s->online_alter_binlog sets to NULL
T2: INSERT committs
T2: thd->online_alter_cache_list is not empty
T2: binlog_commit: DBUG_ASSERT(binlog); is issued.
*/
// Ignore the return result. We already have an error.
thd->mdl_context.upgrade_shared_lock(from->mdl_ticket,
MDL_SHARED_NO_WRITE,
thd->variables.lock_wait_timeout);
from->s->tdc->flushed= 1; // to free the binlog
}
#endif
......@@ -12058,9 +12035,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
if (bulk_insert_started)
(void) to->file->ha_end_bulk_insert();
/* Free resources */
online_alter_cleanup_binlog(thd, from->s);
if (init_read_record_done)
end_read_record(&info);
delete [] copy;
......
......@@ -505,6 +505,14 @@ void TABLE_SHARE::destroy()
}
}
#ifdef HAVE_REPLICATION
if (online_alter_binlog)
{
online_alter_binlog->cleanup();
online_alter_binlog= NULL;
}
#endif
#ifdef WITH_PARTITION_STORAGE_ENGINE
plugin_unlock(NULL, default_part_plugin);
#endif /* WITH_PARTITION_STORAGE_ENGINE */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment