Commit 435e0844 authored by unknown's avatar unknown

wl#3023 ndb to return correct tables for initial table maps

+ removed extra binlog events generated by drop table schema ops to produce predictable test cases


storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp:
  ndb: dict use define for number of pages in table definition
parent 29c9ca33
......@@ -29,12 +29,6 @@ select inserts,updates,deletes from
cluster.binlog_index where epoch > @max_epoch and updates > 0;
inserts updates deletes
2 1 1
select schemaops from
cluster.binlog_index where epoch > @max_epoch and schemaops > 0;
schemaops
1
1
1
flush logs;
purge master logs before now();
select count(*) from cluster.binlog_index;
......@@ -55,8 +49,3 @@ select inserts,updates,deletes from
cluster.binlog_index where epoch > @max_epoch and inserts > 0;
inserts updates deletes
2 0 0
select schemaops from
cluster.binlog_index where epoch > @max_epoch and schemaops > 0;
schemaops
1
1
......@@ -23,10 +23,6 @@ reset master;
alter table t2 add column (b int);
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin1.000001 # Query # # BEGIN
master-bin1.000001 # Table_map # # cluster.apply_status
master-bin1.000001 # Write_rows # #
master-bin1.000001 # Query # # COMMIT
master-bin1.000001 # Query # # use `test`; alter table t2 add column (b int)
reset master;
reset master;
......@@ -35,10 +31,6 @@ drop table mysqltest.t1;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # ALTER DATABASE mysqltest CHARACTER SET latin1
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # cluster.apply_status
master-bin.000001 # Write_rows # #
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # use `mysqltest`; drop table `t1`
reset master;
reset master;
......@@ -118,10 +110,6 @@ master-bin1.000001 # Query # # DROP TABLESPACE ts1
ENGINE = NDB
master-bin1.000001 # Query # # DROP LOGFILE GROUP lg1
ENGINE =NDB
master-bin1.000001 # Query # # BEGIN
master-bin1.000001 # Table_map # # cluster.apply_status
master-bin1.000001 # Write_rows # #
master-bin1.000001 # Query # # COMMIT
master-bin1.000001 # Query # # use `test`; drop table `t1`
reset master;
show tables;
......@@ -138,25 +126,9 @@ Log_name Pos Event_type Server_id End_log_pos Info
master-bin1.000001 # Query # # use `test`; create table t1 (a int key) engine=ndb
master-bin1.000001 # Query # # use `test`; create table t2 (a int key) engine=ndb
master-bin1.000001 # Query # # use `test`; create table t3 (a int key) engine=ndb
master-bin1.000001 # Query # # BEGIN
master-bin1.000001 # Table_map # # cluster.apply_status
master-bin1.000001 # Write_rows # #
master-bin1.000001 # Query # # COMMIT
master-bin1.000001 # Query # # use `test`; rename table `test.t3` to `test.t4`
master-bin1.000001 # Query # # BEGIN
master-bin1.000001 # Table_map # # cluster.apply_status
master-bin1.000001 # Write_rows # #
master-bin1.000001 # Query # # COMMIT
master-bin1.000001 # Query # # use `test`; rename table `test.t2` to `test.t3`
master-bin1.000001 # Query # # BEGIN
master-bin1.000001 # Table_map # # cluster.apply_status
master-bin1.000001 # Write_rows # #
master-bin1.000001 # Query # # COMMIT
master-bin1.000001 # Query # # use `test`; rename table `test.t1` to `test.t2`
master-bin1.000001 # Query # # BEGIN
master-bin1.000001 # Table_map # # cluster.apply_status
master-bin1.000001 # Write_rows # #
master-bin1.000001 # Query # # COMMIT
master-bin1.000001 # Query # # use `test`; rename table `test.t4` to `test.t1`
drop table t1;
drop table t2;
......@@ -171,6 +143,7 @@ create table t1 (a int key) engine=ndb;
insert into t1 values(1);
rename table t1 to t2;
insert into t2 values(2);
drop table t2;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin1.000001 # Query # # use `test`; create table t1 (a int key) engine=ndb
......@@ -187,4 +160,4 @@ master-bin1.000001 # Write_rows # #
master-bin1.000001 # Table_map # # test.t2
master-bin1.000001 # Write_rows # #
master-bin1.000001 # Query # # COMMIT
drop table t2;
master-bin1.000001 # Query # # use `test`; drop table `t2`
......@@ -33,11 +33,9 @@ cluster.binlog_index ORDER BY epoch DESC LIMIT 1;
SELECT inserts,updates,deletes,schemaops FROM
cluster.binlog_index WHERE epoch > <the_epoch> AND epoch < <the_epoch2>;
inserts updates deletes schemaops
0 0 0 1
drop table t1;
SHOW TABLES;
Tables_in_test
SELECT inserts,updates,deletes,schemaops FROM
cluster.binlog_index WHERE epoch > <the_epoch> AND epoch < <the_epoch2>;
inserts updates deletes schemaops
0 0 0 1
......@@ -83,12 +83,12 @@ master-bin.000002 # Write_rows 1 #
master-bin.000002 # Query 1 # COMMIT
show binary logs;
Log_name File_size
master-bin.000001 1722
master-bin.000002 603
master-bin.000001 1698
master-bin.000002 591
start slave;
show binary logs;
Log_name File_size
slave-bin.000001 1817
slave-bin.000001 1793
slave-bin.000002 198
show binlog events in 'slave-bin.000001' from 4;
Log_name Pos Event_type Server_id End_log_pos Info
......@@ -102,13 +102,13 @@ slave-bin.000001 # Write_rows 2 #
slave-bin.000001 # Query 2 # COMMIT
slave-bin.000001 # Query 1 # use `test`; drop table t1
slave-bin.000001 # Query 1 # use `test`; create table t1 (word char(20) not null)ENGINE=NDB
slave-bin.000001 # Query 1 # use `test`; create table t3 (a int)ENGINE=NDB
slave-bin.000001 # Query 2 # BEGIN
slave-bin.000001 # Table_map 2 # cluster.apply_status
slave-bin.000001 # Write_rows 2 #
slave-bin.000001 # Table_map 2 # test.t1
slave-bin.000001 # Write_rows 2 #
slave-bin.000001 # Query 2 # COMMIT
slave-bin.000001 # Query 1 # use `test`; create table t3 (a int)ENGINE=NDB
slave-bin.000001 # Rotate 2 # slave-bin.000002;pos=4
show binlog events in 'slave-bin.000002' from 4;
Log_name Pos Event_type Server_id End_log_pos Info
......@@ -122,7 +122,7 @@ slave-bin.000002 # Write_rows 2 #
slave-bin.000002 # Query 2 # COMMIT
show slave status;
Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master
# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 603 # # master-bin.000002 Yes Yes # 0 0 603 # None 0 No #
# 127.0.0.1 root MASTER_PORT 1 master-bin.000002 591 # # master-bin.000002 Yes Yes # 0 0 591 # None 0 No #
show binlog events in 'slave-bin.000005' from 4;
ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log
DROP TABLE t1;
......
......@@ -15,8 +15,8 @@ events : test case unstable. andrey will fix
#ndb_alter_table_row : sometimes wrong error 1015!=1046
ndb_autodiscover : Needs to be fixed w.r.t binlog
ndb_autodiscover2 : Needs to be fixed w.r.t binlog
ndb_binlog_basic : Results are not deterministic, Tomas will fix
ndb_binlog_ddl_multi : Bug#17038 [PATCH PENDING]
#ndb_binlog_basic : Results are not deterministic, Tomas will fix
#ndb_binlog_ddl_multi : Bug#17038 [PATCH PENDING]
ndb_load : Bug#17233
partition_03ndb : Bug#16385
ps_7ndb : dbug assert in RBR mode when executing test suite
......@@ -31,7 +31,7 @@ rpl_ndb_delete_nowhere : Bug#17400: delete & update of rows in table without pk
rpl_ndb_innodb2ndb : Bugs#17400: delete & update of rows in table without pk fails
rpl_ndb_insert_ignore : Bugs: #17431: INSERT IGNORE INTO returns failed: 1296
rpl_ndb_myisam2ndb : Bugs#17400: delete & update of rows in table without pk fails
rpl_ndb_log : result not deterministic
#rpl_ndb_log : result not deterministic
rpl_ndb_relay_space : Bug#16993
rpl_ndb_multi_update2 : BUG#17738 In progress
rpl_ndb_multi_update3 : Bug#17400: delete & update of rows in table without pk fails
......
......@@ -42,8 +42,6 @@ select inserts from cluster.binlog_index where epoch > @max_epoch and inserts >
select deletes from cluster.binlog_index where epoch > @max_epoch and deletes > 5;
select inserts,updates,deletes from
cluster.binlog_index where epoch > @max_epoch and updates > 0;
select schemaops from
cluster.binlog_index where epoch > @max_epoch and schemaops > 0;
#
# check that purge clears the binlog_index
......@@ -72,5 +70,3 @@ drop table t1;
drop database mysqltest;
select inserts,updates,deletes from
cluster.binlog_index where epoch > @max_epoch and inserts > 0;
select schemaops from
cluster.binlog_index where epoch > @max_epoch and schemaops > 0;
......@@ -174,10 +174,9 @@ create table t1 (a int key) engine=ndb;
insert into t1 values(1);
rename table t1 to t2;
insert into t2 values(2);
drop table t2;
# now we should see data in table t1 _and_ t2
# prior to bug fix, data was missing for t2
--connection server2
--source include/show_binlog_events.inc
drop table t2;
......@@ -43,6 +43,7 @@ SELECT * FROM t2 ORDER BY a;
eval SELECT inserts,updates,deletes,schemaops FROM
cluster.binlog_index WHERE epoch=$the_epoch;
# single schema ops will not show
# drop the table on server1
DROP TABLE t2;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = NDB;
......
......@@ -2651,7 +2651,8 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
return 0;
TABLE *table= share->table;
assert(table != 0);
DBUG_ASSERT(trans.good());
DBUG_ASSERT(table != 0);
dbug_print_table("table", table);
......@@ -3051,66 +3052,100 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
Binlog_index_row row;
while (pOp != NULL)
{
gci= pOp->getGCI();
DBUG_PRINT("info", ("Handling gci: %d", (unsigned)gci));
// sometimes get TE_ALTER with invalid table
DBUG_ASSERT(pOp->getEventType() == NdbDictionary::Event::TE_ALTER ||
! IS_NDB_BLOB_PREFIX(pOp->getEvent()->getTable()->getName()));
DBUG_ASSERT(gci <= ndb_latest_received_binlog_epoch);
ndb->
setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip);
ndb->setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
assert(pOp->getGCI() <= ndb_latest_received_binlog_epoch);
bzero((char*) &row, sizeof(row));
injector::transaction trans= inj->new_trans(thd);
{ // pass table map before epoch
Uint32 iter=0;
const NdbEventOperation* gci_op;
injector::transaction trans;
// pass table map before epoch
{
Uint32 iter= 0;
const NdbEventOperation *gci_op;
Uint32 event_types;
while ((gci_op=ndb->getGCIEventOperations(&iter, &event_types))
!= NULL)
while ((gci_op= ndb->getGCIEventOperations(&iter, &event_types))
!= NULL)
{
NDB_SHARE* share=(NDB_SHARE*)gci_op->getCustomData();
DBUG_PRINT("info", ("per gci op %p share %p event types 0x%x",
NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData();
DBUG_PRINT("info", ("per gci_op: %p share: %p event_types: 0x%x",
gci_op, share, event_types));
// workaround for interface returning TE_STOP events
// which are normally filtered out below in the nextEvent loop
if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0)
{
DBUG_PRINT("info", ("Skipped TE_STOP on table %s",
gci_op->getEvent()->getTable()->getName()));
continue;
}
// this should not happen
if (share == NULL || share->table == NULL)
{
DBUG_PRINT("info", ("no share or table !"));
DBUG_PRINT("info", ("no share or table %s!",
gci_op->getEvent()->getTable()->getName()));
continue;
}
TABLE* table=share->table;
const LEX_STRING& name=table->s->table_name;
if (share == apply_status_share)
{
// skip this table, it is handled specially
continue;
}
TABLE *table= share->table;
const LEX_STRING &name= table->s->table_name;
if ((event_types & (NdbDictionary::Event::TE_INSERT |
NdbDictionary::Event::TE_UPDATE |
NdbDictionary::Event::TE_DELETE)) == 0)
{
DBUG_PRINT("info", ("skipping non data event table: %.*s",
name.length, name.str));
continue;
}
if (!trans.good())
{
DBUG_PRINT("info",
("Found new data event, initializing transaction"));
inj->new_trans(thd, &trans);
}
DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
injector::transaction::table tbl(table, true);
// TODO enable when mats patch pushed
//trans.use_table(::server_id, tbl);
}
}
gci= pOp->getGCI();
if (apply_status_share)
{
TABLE *table= apply_status_share->table;
const LEX_STRING& name=table->s->table_name;
DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
injector::transaction::table tbl(table, true);
// TODO enable when mats patch pushed
//trans.use_table(::server_id, tbl);
MY_BITMAP b;
uint32 bitbuf;
DBUG_ASSERT(table->s->fields <= sizeof(bitbuf) * 8);
bitmap_init(&b, &bitbuf, table->s->fields, false);
bitmap_set_all(&b);
table->field[0]->store((longlong)::server_id);
table->field[1]->store((longlong)gci);
trans.write_row(::server_id,
injector::transaction::table(table, true),
&b, table->s->fields,
table->record[0]);
}
else
if (trans.good())
{
sql_print_error("NDB: Could not get apply status share");
if (apply_status_share)
{
TABLE *table= apply_status_share->table;
const LEX_STRING& name=table->s->table_name;
DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
injector::transaction::table tbl(table, true);
// TODO enable when mats patch pushed
//trans.use_table(::server_id, tbl);
MY_BITMAP b;
uint32 bitbuf;
DBUG_ASSERT(table->s->fields <= sizeof(bitbuf) * 8);
bitmap_init(&b, &bitbuf, table->s->fields, false);
bitmap_set_all(&b);
table->field[0]->store((longlong)::server_id);
table->field[1]->store((longlong)gci);
trans.write_row(::server_id,
injector::transaction::table(table, true),
&b, table->s->fields,
table->record[0]);
}
else
{
sql_print_error("NDB: Could not get apply status share");
}
}
#ifdef RUN_NDB_BINLOG_TIMER
write_timer.start();
......@@ -3128,11 +3163,28 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
{
NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData();
DBUG_PRINT("info",
("EVENT TYPE:%d GCI:%lld last applied: %lld "
"share: 0x%lx", pOp->getEventType(), gci,
ndb_latest_applied_binlog_epoch, share));
("EVENT TYPE: %d GCI: %lld last applied: %lld "
"share: 0x%lx (%s.%s)", pOp->getEventType(), gci,
ndb_latest_applied_binlog_epoch, share,
share ? share->db : "share == NULL",
share ? share->table_name : ""));
DBUG_ASSERT(share != 0);
}
// assert that there is consistancy between gci op list
// and event list
{
Uint32 iter= 0;
const NdbEventOperation *gci_op;
Uint32 event_types;
while ((gci_op= ndb->getGCIEventOperations(&iter, &event_types))
!= NULL)
{
if (gci_op == pOp)
break;
}
DBUG_ASSERT(gci_op == pOp);
DBUG_ASSERT((event_types & pOp->getEventType()) != 0);
}
#endif
if ((unsigned) pOp->getEventType() <
(unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT)
......@@ -3140,8 +3192,9 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
else
{
// set injector_ndb database/schema from table internal name
int ret= ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable());
assert(ret == 0);
int ret=
ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable());
DBUG_ASSERT(ret == 0);
ndb_binlog_thread_handle_non_data_event(ndb, pOp, row);
// reset to catch errors
ndb->setDatabaseName("");
......@@ -3158,13 +3211,13 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
write_timer.stop();
#endif
if (row.n_inserts || row.n_updates
|| row.n_deletes || row.n_schemaops)
if (trans.good())
{
DBUG_ASSERT(row.n_inserts || row.n_updates || row.n_deletes);
injector::transaction::binlog_pos start= trans.start_pos();
if (int r= trans.commit())
{
sql_print_error("NDB binlog:"
sql_print_error("NDB binlog: "
"Error during COMMIT of GCI. Error: %d",
r);
/* TODO: Further handling? */
......@@ -3173,13 +3226,11 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
row.master_log_file= start.file_name();
row.master_log_pos= start.file_pos();
DBUG_PRINT("info",("COMMIT gci %lld",gci));
DBUG_PRINT("info", ("COMMIT gci: %lld", gci));
if (ndb_update_binlog_index)
ndb_add_binlog_index(thd, &row);
ndb_latest_applied_binlog_epoch= gci;
}
else
trans.commit();
ndb_latest_handled_binlog_epoch= gci;
#ifdef RUN_NDB_BINLOG_TIMER
gci_timer.stop();
......
......@@ -43,6 +43,9 @@ injector::transaction::transaction(MYSQL_LOG *log, THD *thd)
injector::transaction::~transaction()
{
if (!good())
return;
/* Needed since my_free expects a 'char*' (instead of 'void*'). */
char* const the_memory= const_cast<char*>(m_start_pos.m_file_name);
......
......@@ -348,7 +348,7 @@ void Dbdict::packTableIntoPages(Signal* signal)
memset(&pagePtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
LinearWriter w(&pagePtr.p->word[ZPAGE_HEADER_SIZE],
8 * ZSIZE_OF_PAGES_IN_WORDS);
ZMAX_PAGES_OF_TABLE_DEFINITION * ZSIZE_OF_PAGES_IN_WORDS);
w.first();
switch((DictTabInfo::TableType)type) {
case DictTabInfo::SystemTable:
......
......@@ -1173,15 +1173,17 @@ NdbEventBuffer::nextEvent()
NdbEventOperationImpl*
NdbEventBuffer::getGCIEventOperations(Uint32* iter, Uint32* event_types)
{
DBUG_ENTER("NdbEventBuffer::getGCIEventOperations");
EventBufData_list::Gci_ops *gci_ops = m_available_data.first_gci_ops();
if (*iter < gci_ops->m_gci_op_count)
{
EventBufData_list::Gci_op g = gci_ops->m_gci_op_list[(*iter)++];
if (event_types != NULL)
*event_types = g.event_types;
return g.op;
DBUG_PRINT("info", ("gci: %d", (unsigned)gci_ops->m_gci));
DBUG_RETURN(g.op);
}
return NULL;
DBUG_RETURN(NULL);
}
void
......@@ -1647,11 +1649,19 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
else
{
// event with same op, PK found, merge into old buffer
Uint32 old_op = data->sdata->operation;
if (unlikely(merge_data(sdata, ptr, data)))
{
op->m_has_error = 3;
DBUG_RETURN_EVENT(-1);
}
Uint32 new_op = data->sdata->operation;
// make Gci_ops reflect the merge by delete old and add new
EventBufData_list::Gci_op g = { op, (1 << old_op) };
// bucket->m_data.del_gci_op(g); // XXX whats wrong? fix later
g.event_types = (1 << new_op);
bucket->m_data.add_gci_op(g);
}
DBUG_RETURN_EVENT(0);
}
......@@ -2184,7 +2194,7 @@ void EventBufData_list::append_list(EventBufData_list *list, Uint64 gci)
}
void
EventBufData_list::add_gci_op(Gci_op g)
EventBufData_list::add_gci_op(Gci_op g, bool del)
{
assert(g.op != NULL);
Uint32 i;
......@@ -2193,7 +2203,10 @@ EventBufData_list::add_gci_op(Gci_op g)
break;
}
if (i < m_gci_op_count) {
m_gci_op_list[i].event_types |= g.event_types;
if (! del)
m_gci_op_list[i].event_types |= g.event_types;
else
m_gci_op_list[i].event_types &= ~ g.event_types;
} else {
if (m_gci_op_count == m_gci_op_alloc) {
Uint32 n = 1 + 2 * m_gci_op_alloc;
......@@ -2207,6 +2220,7 @@ EventBufData_list::add_gci_op(Gci_op g)
m_gci_op_alloc = n;
}
assert(m_gci_op_count < m_gci_op_alloc);
assert(! del);
m_gci_op_list[m_gci_op_count++] = g;
}
}
......
......@@ -129,9 +129,11 @@ public:
};
Gci_ops *first_gci_ops();
Gci_ops *next_gci_ops();
private:
// case 1 above; add Gci_op to single list
void add_gci_op(Gci_op g);
void add_gci_op(Gci_op g, bool del = false);
// delete bit from existing flags
void del_gci_op(Gci_op g) { add_gci_op(g, true); }
private:
// case 2 above; move single list or multi list from
// one list to another
void move_gci_ops(EventBufData_list *list, Uint64 gci);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment