Commit c76072db authored by Nikita Malyavin's avatar Nikita Malyavin Committed by Sergei Golubchik

MDEV-31033 ER_KEY_NOT_FOUND upon online COPY ALTER on a partitioned table

The row events were applied "twice": once for the ha_partition, and one
more time for the underlying storage engine.

There's no such problem in binlog/rpl, because ha_partiton::row_logging
is normally set to false.

The fix makes the events replicate only when the handler is a root handler.
We will try to *guess* this by comparing it to table->file. The same
approach is used in the MDEV-21540 fix, 231feabd. The assumption is made,
that the row methods are only called for table->file (and never for a
cloned handler), hence the assertions are added in ha_innobase and
ha_myisam to make sure that this is true at least for those engines

Also closes MDEV-31040, however the test is not included, since we have no
convenient way to construct a deterministic version.
parent 0775c7bd
......@@ -1231,6 +1231,27 @@ a
223
889
drop table t1;
#
# MDEV-31033 ER_KEY_NOT_FOUND upon online COPY ALTER on a partitioned
# table
create table t (a int) partition by hash(a) partitions 2;
insert into t values (1),(3);
set debug_sync= 'alter_table_online_downgraded SIGNAL downgraded wait_for goon';
alter table t force, algorithm=copy, lock=none;
connection con1;
set debug_sync= 'now WAIT_FOR downgraded';
update t set a = a + 1;
insert t values (1),(2);
delete from t where a = 4 limit 1;
set debug_sync= 'now SIGNAL goon';
connection default;
select * from t;
a
2
2
1
drop table t;
set debug_sync= reset;
disconnect con1;
#
# End of 11.2 tests
......
......@@ -1418,6 +1418,29 @@ select * from t1;
drop table t1;
--echo #
--echo # MDEV-31033 ER_KEY_NOT_FOUND upon online COPY ALTER on a partitioned
--echo # table
create table t (a int) partition by hash(a) partitions 2;
insert into t values (1),(3);
set debug_sync= 'alter_table_online_downgraded SIGNAL downgraded wait_for goon';
send alter table t force, algorithm=copy, lock=none;
--connection con1
set debug_sync= 'now WAIT_FOR downgraded';
update t set a = a + 1;
insert t values (1),(2);
delete from t where a = 4 limit 1;
set debug_sync= 'now SIGNAL goon';
--connection default
--reap
select * from t;
drop table t;
set debug_sync= reset;
--disconnect con1
--echo #
--echo # End of 11.2 tests
......
......@@ -7290,7 +7290,7 @@ int handler::binlog_log_row(const uchar *before_record,
log_func, row_logging_has_trans);
#ifdef HAVE_REPLICATION
if (unlikely(!error && table->s->online_alter_binlog))
if (unlikely(!error && table->s->online_alter_binlog && is_root_handler()))
error= binlog_log_row_online_alter(table, before_record, after_record,
log_func);
#endif // HAVE_REPLICATION
......@@ -7815,18 +7815,7 @@ int handler::ha_write_row(const uchar *buf)
if ((error= ha_check_overlaps(NULL, buf)))
DBUG_RETURN(error);
/*
NOTE: this != table->file is true in 3 cases:
1. under copy_partitions() (REORGANIZE PARTITION): that does not
require long unique check as it does not introduce new rows or new index.
2. under partition's ha_write_row() (INSERT): check_duplicate_long_entries()
was already done by ha_partition::ha_write_row(), no need to check it
again for each single partition.
3. under ha_mroonga::wrapper_write_row()
*/
if (table->s->long_unique_table && this == table->file)
if (table->s->long_unique_table && is_root_handler())
{
DBUG_ASSERT(inited == NONE || lookup_handler != this);
if ((error= check_duplicate_long_entries(buf)))
......@@ -7877,14 +7866,7 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
uint saved_status= table->status;
error= ha_check_overlaps(old_data, new_data);
/*
NOTE: this != table->file is true under partition's ha_update_row():
check_duplicate_long_entries_update() was already done by
ha_partition::ha_update_row(), no need to check it again for each single
partition. Same applies to ha_mroonga wrapper.
*/
if (!error && table->s->long_unique_table && this == table->file)
if (!error && table->s->long_unique_table && is_root_handler())
error= check_duplicate_long_entries_update(new_data);
table->status= saved_status;
......
......@@ -3557,6 +3557,11 @@ class handler :public Sql_alloc
if (org_keyread != MAX_KEY)
ha_start_keyread(org_keyread);
}
protected:
bool is_root_handler() const;
public:
int check_collation_compatibility();
int check_long_hash_compatibility() const;
int ha_check_for_upgrade(HA_CHECK_OPT *check_opt);
......
......@@ -7561,6 +7561,22 @@ inline bool handler::has_long_unique()
return table->s->long_unique_table;
}
/**
Return whether the handler is root.
@return false if table is maintained by different handlerton, true otherwise.
@note The implementation supposes that the same handler can't be found as both
root and non-root.
There are two known cases when it's non-root:
1. under partition's ha_write_row() (also true for copy_partitions())
2. under ha_mroonga::wrapper_write_row();
same applies for ha_delete_row/ha_update_row.
*/
inline bool handler::is_root_handler() const
{
return ht == table->file->ht;
}
extern pthread_attr_t *get_connection_attrib(void);
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment