Commit 7141bfd0 authored by unknown's avatar unknown

Merge ahristov@bk-internal.mysql.com:/home/bk/mysql-5.1-new

into lmy004.:/work/mysql-5.1-bug16406

parents 89f598ee bd24b49e
...@@ -8,8 +8,6 @@ a b c ...@@ -8,8 +8,6 @@ a b c
2 two two 2 two two
alter table t1 drop index c; alter table t1 drop index c;
select * from t1 where c = 'two'; select * from t1 where c = 'two';
ERROR HY000: Table definition has changed, please retry transaction
select * from t1 where c = 'two';
a b c a b c
2 two two 2 two two
drop table t1; drop table t1;
......
...@@ -30,14 +30,6 @@ drop table t1; ...@@ -30,14 +30,6 @@ drop table t1;
create table t1 (a int) engine=ndbcluster; create table t1 (a int) engine=ndbcluster;
insert into t1 value (2); insert into t1 value (2);
select * from t1; select * from t1;
ERROR HY000: Table definition has changed, please retry transaction
show warnings;
Level Code Message
Error 1296 Got error 241 'Invalid schema object version' from NDB
Error 1412 Table definition has changed, please retry transaction
Error 1105 Unknown error
flush table t1;
select * from t1;
a a
2 2
flush status; flush status;
...@@ -58,15 +50,9 @@ a ...@@ -58,15 +50,9 @@ a
select * from t3; select * from t3;
a b c last_col a b c last_col
1 Hi! 89 Longtext column 1 Hi! 89 Longtext column
show status like 'handler_discover%';
Variable_name Value
Handler_discover 1
show tables like 't4'; show tables like 't4';
Tables_in_test (t4) Tables_in_test (t4)
t4 t4
show status like 'handler_discover%';
Variable_name Value
Handler_discover 2
show tables; show tables;
Tables_in_test Tables_in_test
t1 t1
...@@ -74,4 +60,3 @@ t2 ...@@ -74,4 +60,3 @@ t2
t3 t3
t4 t4
drop table t1, t2, t3, t4; drop table t1, t2, t3, t4;
drop table t1, t3, t4;
...@@ -17,8 +17,6 @@ select * from t1 where c = 'two'; ...@@ -17,8 +17,6 @@ select * from t1 where c = 'two';
connection server1; connection server1;
alter table t1 drop index c; alter table t1 drop index c;
connection server2; connection server2;
-- error 1412
select * from t1 where c = 'two';
select * from t1 where c = 'two'; select * from t1 where c = 'two';
connection server1; connection server1;
drop table t1; drop table t1;
......
...@@ -41,11 +41,12 @@ drop table t1; ...@@ -41,11 +41,12 @@ drop table t1;
create table t1 (a int) engine=ndbcluster; create table t1 (a int) engine=ndbcluster;
insert into t1 value (2); insert into t1 value (2);
connection server1; connection server1;
# Currently a retry is required remotely ## Currently a retry is required remotely
--error 1412 #--error 1412
select * from t1; #select * from t1;
show warnings; #show warnings;
flush table t1; #flush table t1;
# Table definition change should be propagated automatically
select * from t1; select * from t1;
# Connect to server2 and use the tables from there # Connect to server2 and use the tables from there
...@@ -65,13 +66,9 @@ create table t4 (pk int primary key, b int) engine=ndb; ...@@ -65,13 +66,9 @@ create table t4 (pk int primary key, b int) engine=ndb;
connection server1; connection server1;
select * from t1; select * from t1;
select * from t3; select * from t3;
show status like 'handler_discover%';
show tables like 't4'; show tables like 't4';
show status like 'handler_discover%';
show tables; show tables;
drop table t1, t2, t3, t4; drop table t1, t2, t3, t4;
connection server2;
drop table t1, t3, t4;
# End of 4.1 tests # End of 4.1 tests
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <ndbapi/NdbIndexStat.hpp> #include <ndbapi/NdbIndexStat.hpp>
#include "ha_ndbcluster_binlog.h" #include "ha_ndbcluster_binlog.h"
#include "ha_ndbcluster_tables.h"
#ifdef ndb_dynamite #ifdef ndb_dynamite
#undef assert #undef assert
...@@ -476,8 +477,7 @@ ha_ndbcluster::invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb, ...@@ -476,8 +477,7 @@ ha_ndbcluster::invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb,
#ifdef HAVE_NDB_BINLOG #ifdef HAVE_NDB_BINLOG
char key[FN_REFLEN]; char key[FN_REFLEN];
strxnmov(key, FN_LEN-1, mysql_data_home, "/", build_table_filename(key, sizeof(key), dbname, tabname, "");
dbname, "/", tabname, NullS);
DBUG_PRINT("info", ("Getting ndbcluster mutex")); DBUG_PRINT("info", ("Getting ndbcluster mutex"));
pthread_mutex_lock(&ndbcluster_mutex); pthread_mutex_lock(&ndbcluster_mutex);
NDB_SHARE *ndb_share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables, NDB_SHARE *ndb_share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables,
...@@ -4230,16 +4230,14 @@ int ha_ndbcluster::create(const char *name, ...@@ -4230,16 +4230,14 @@ int ha_ndbcluster::create(const char *name,
NDBCOL col; NDBCOL col;
uint pack_length, length, i, pk_length= 0; uint pack_length, length, i, pk_length= 0;
const void *data, *pack_data; const void *data, *pack_data;
char name2[FN_HEADLEN];
bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE); bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
DBUG_ENTER("ha_ndbcluster::create"); DBUG_ENTER("ha_ndbcluster::create");
DBUG_PRINT("enter", ("name: %s", name)); DBUG_PRINT("enter", ("name: %s", name));
strcpy(name2, name); DBUG_ASSERT(*fn_rext((char*)name) == 0);
DBUG_ASSERT(*fn_rext((char*)name2) == 0); set_dbname(name);
set_dbname(name2); set_tabname(name);
set_tabname(name2);
table= form; table= form;
if (create_from_engine) if (create_from_engine)
...@@ -4252,7 +4250,7 @@ int ha_ndbcluster::create(const char *name, ...@@ -4252,7 +4250,7 @@ int ha_ndbcluster::create(const char *name,
if ((my_errno= write_ndb_file(name))) if ((my_errno= write_ndb_file(name)))
DBUG_RETURN(my_errno); DBUG_RETURN(my_errno);
#ifdef HAVE_NDB_BINLOG #ifdef HAVE_NDB_BINLOG
ndbcluster_create_binlog_setup(get_ndb(), name2, strlen(name2), ndbcluster_create_binlog_setup(get_ndb(), name, strlen(name),
m_dbname, m_tabname, FALSE); m_dbname, m_tabname, FALSE);
#endif /* HAVE_NDB_BINLOG */ #endif /* HAVE_NDB_BINLOG */
DBUG_RETURN(my_errno); DBUG_RETURN(my_errno);
...@@ -4400,18 +4398,18 @@ int ha_ndbcluster::create(const char *name, ...@@ -4400,18 +4398,18 @@ int ha_ndbcluster::create(const char *name,
First make sure we get a "fresh" share here, not an old trailing one... First make sure we get a "fresh" share here, not an old trailing one...
*/ */
{ {
const char *key= name2; uint length= (uint) strlen(name);
uint length= (uint) strlen(key);
if ((share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables, if ((share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
(byte*) key, length))) (byte*) name, length)))
handle_trailing_share(share); handle_trailing_share(share);
} }
/* /*
get a new share get a new share
*/ */
if (!(share= get_share(name2, form, true, true)))
if (!(share= get_share(name, form, true, true)))
{ {
sql_print_error("NDB: allocating table share for %s failed", name2); sql_print_error("NDB: allocating table share for %s failed", name);
/* my_errno is set */ /* my_errno is set */
} }
pthread_mutex_unlock(&ndbcluster_mutex); pthread_mutex_unlock(&ndbcluster_mutex);
...@@ -4421,6 +4419,12 @@ int ha_ndbcluster::create(const char *name, ...@@ -4421,6 +4419,12 @@ int ha_ndbcluster::create(const char *name,
const NDBTAB *t= dict->getTable(m_tabname); const NDBTAB *t= dict->getTable(m_tabname);
String event_name(INJECTOR_EVENT_LEN); String event_name(INJECTOR_EVENT_LEN);
ndb_rep_event_name(&event_name,m_dbname,m_tabname); ndb_rep_event_name(&event_name,m_dbname,m_tabname);
int do_event_op= ndb_binlog_running;
if (!schema_share &&
strcmp(share->db, NDB_REP_DB) == 0 &&
strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
do_event_op= 1;
/* /*
Always create an event for the table, as other mysql servers Always create an event for the table, as other mysql servers
...@@ -4429,7 +4433,7 @@ int ha_ndbcluster::create(const char *name, ...@@ -4429,7 +4433,7 @@ int ha_ndbcluster::create(const char *name,
if (ndbcluster_create_event(ndb, t, event_name.c_ptr(), share) < 0) if (ndbcluster_create_event(ndb, t, event_name.c_ptr(), share) < 0)
{ {
/* this is only a serious error if the binlog is on */ /* this is only a serious error if the binlog is on */
if (share && ndb_binlog_running) if (share && do_event_op)
{ {
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG), ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
...@@ -4442,14 +4446,14 @@ int ha_ndbcluster::create(const char *name, ...@@ -4442,14 +4446,14 @@ int ha_ndbcluster::create(const char *name,
sql_print_information("NDB Binlog: CREATE TABLE Event: %s", sql_print_information("NDB Binlog: CREATE TABLE Event: %s",
event_name.c_ptr()); event_name.c_ptr());
if (share && ndb_binlog_running && if (share && do_event_op &&
ndbcluster_create_event_ops(share, t, event_name.c_ptr()) < 0) ndbcluster_create_event_ops(share, t, event_name.c_ptr()) < 0)
{ {
sql_print_error("NDB Binlog: FAILED CREATE TABLE event operations." sql_print_error("NDB Binlog: FAILED CREATE TABLE event operations."
" Event: %s", name2); " Event: %s", name);
/* a warning has been issued to the client */ /* a warning has been issued to the client */
} }
if (share && !ndb_binlog_running) if (share && !do_event_op)
share->flags|= NSF_NO_BINLOG; share->flags|= NSF_NO_BINLOG;
ndbcluster_log_schema_op(current_thd, share, ndbcluster_log_schema_op(current_thd, share,
current_thd->query, current_thd->query_length, current_thd->query, current_thd->query_length,
...@@ -4732,9 +4736,8 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) ...@@ -4732,9 +4736,8 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
ERR_RETURN(dict->getNdbError()); ERR_RETURN(dict->getNdbError());
} }
#ifdef HAVE_NDB_BINLOG #ifdef HAVE_NDB_BINLOG
NDB_SHARE *share= 0; NDB_SHARE *share= get_share(from, 0, false);
if (ndb_binlog_running && if (share)
(share= get_share(from, 0, false)))
{ {
int r= rename_share(share, to); int r= rename_share(share, to);
DBUG_ASSERT(r == 0); DBUG_ASSERT(r == 0);
...@@ -4795,7 +4798,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) ...@@ -4795,7 +4798,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
if (ndb_extra_logging) if (ndb_extra_logging)
sql_print_information("NDB Binlog: RENAME Event: %s", sql_print_information("NDB Binlog: RENAME Event: %s",
event_name.c_ptr()); event_name.c_ptr());
if (share) if (share && ndb_binlog_running)
{ {
if (ndbcluster_create_event_ops(share, ndbtab, if (ndbcluster_create_event_ops(share, ndbtab,
event_name.c_ptr()) < 0) event_name.c_ptr()) < 0)
...@@ -5319,7 +5322,7 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name, ...@@ -5319,7 +5322,7 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name,
NDBDICT* dict= ndb->getDictionary(); NDBDICT* dict= ndb->getDictionary();
dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics)); dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
dict->invalidateTable(name); dict->invalidateTable(name);
strxnmov(key, FN_LEN-1, mysql_data_home, "/", db, "/", name, NullS); build_table_filename(key, sizeof(key), db, name, "");
NDB_SHARE *share= get_share(key, 0, false); NDB_SHARE *share= get_share(key, 0, false);
if (share && get_ndb_share_state(share) == NSS_ALTERED) if (share && get_ndb_share_state(share) == NSS_ALTERED)
{ {
...@@ -5453,13 +5456,14 @@ int ndbcluster_drop_database_impl(const char *path) ...@@ -5453,13 +5456,14 @@ int ndbcluster_drop_database_impl(const char *path)
} }
// Drop any tables belonging to database // Drop any tables belonging to database
char full_path[FN_REFLEN]; char full_path[FN_REFLEN];
char *tmp= strxnmov(full_path, FN_REFLEN-1, share_prefix, dbname, "/", char *tmp= full_path +
NullS); build_table_filename(full_path, sizeof(full_path), dbname, "", "");
ndb->setDatabaseName(dbname); ndb->setDatabaseName(dbname);
List_iterator_fast<char> it(drop_list); List_iterator_fast<char> it(drop_list);
while ((tabname=it++)) while ((tabname=it++))
{ {
strxnmov(tmp, FN_REFLEN - (tmp - full_path)-1, tabname, NullS); tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1);
if (ha_ndbcluster::delete_table(0, ndb, full_path, dbname, tabname)) if (ha_ndbcluster::delete_table(0, ndb, full_path, dbname, tabname))
{ {
const NdbError err= dict->getNdbError(); const NdbError err= dict->getNdbError();
...@@ -5552,14 +5556,16 @@ int ndbcluster_find_all_files(THD *thd) ...@@ -5552,14 +5556,16 @@ int ndbcluster_find_all_files(THD *thd)
continue; continue;
/* check if database exists */ /* check if database exists */
char *end= strxnmov(key, FN_LEN-1, mysql_data_home, "/", char *end= key +
elmt.database, NullS); build_table_filename(key, sizeof(key), elmt.database, "", "");
if (my_access(key, F_OK)) if (my_access(key, F_OK))
{ {
/* no such database defined, skip table */ /* no such database defined, skip table */
continue; continue;
} }
end= strxnmov(end, FN_LEN-1-(end-key), "/", elmt.name, NullS); /* finalize construction of path */
end+= tablename_to_filename(elmt.name, end,
sizeof(key)-(end-key));
const void *data= 0, *pack_data= 0; const void *data= 0, *pack_data= 0;
uint length, pack_length; uint length, pack_length;
int discover= 0; int discover= 0;
...@@ -5694,10 +5700,9 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, ...@@ -5694,10 +5700,9 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
} }
// File is not in NDB, check for .ndb file with this name // File is not in NDB, check for .ndb file with this name
(void)strxnmov(name, FN_REFLEN-1, build_table_filename(name, sizeof(name), db, file_name, ha_ndb_ext);
mysql_data_home,"/",db,"/",file_name,ha_ndb_ext,NullS);
DBUG_PRINT("info", ("Check access for %s", name)); DBUG_PRINT("info", ("Check access for %s", name));
if (access(name, F_OK)) if (my_access(name, F_OK))
{ {
DBUG_PRINT("info", ("%s did not exist on disk", name)); DBUG_PRINT("info", ("%s did not exist on disk", name));
// .ndb file did not exist on disk, another table type // .ndb file did not exist on disk, another table type
...@@ -5719,12 +5724,13 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, ...@@ -5719,12 +5724,13 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
#ifdef HAVE_NDB_BINLOG #ifdef HAVE_NDB_BINLOG
/* setup logging to binlog for all discovered tables */ /* setup logging to binlog for all discovered tables */
{ {
char *end, *end1= char *end, *end1= name +
strxnmov(name, sizeof(name), mysql_data_home, "/", db, "/", NullS); build_table_filename(name, sizeof(name), db, "", "");
for (i= 0; i < ok_tables.records; i++) for (i= 0; i < ok_tables.records; i++)
{ {
file_name= (char*)hash_element(&ok_tables, i); file_name= (char*)hash_element(&ok_tables, i);
end= strxnmov(end1, sizeof(name) - (end1 - name), file_name, NullS); end= end1 +
tablename_to_filename(file_name, end1, sizeof(name) - (end1 - name));
pthread_mutex_lock(&LOCK_open); pthread_mutex_lock(&LOCK_open);
ndbcluster_create_binlog_setup(ndb, name, end-name, ndbcluster_create_binlog_setup(ndb, name, end-name,
db, file_name, TRUE); db, file_name, TRUE);
...@@ -5741,9 +5747,8 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, ...@@ -5741,9 +5747,8 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
file_name= hash_element(&ndb_tables, i); file_name= hash_element(&ndb_tables, i);
if (!hash_search(&ok_tables, file_name, strlen(file_name))) if (!hash_search(&ok_tables, file_name, strlen(file_name)))
{ {
strxnmov(name, sizeof(name)-1, build_table_filename(name, sizeof(name), db, file_name, reg_ext);
mysql_data_home, "/", db, "/", file_name, reg_ext, NullS); if (my_access(name, F_OK))
if (access(name, F_OK))
{ {
DBUG_PRINT("info", ("%s must be discovered", file_name)); DBUG_PRINT("info", ("%s must be discovered", file_name));
// File is in list of ndb tables and not in ok_tables // File is in list of ndb tables and not in ok_tables
...@@ -6277,7 +6282,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, ...@@ -6277,7 +6282,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
NDB_SHARE *share; NDB_SHARE *share;
DBUG_ENTER("ndb_get_commitcount"); DBUG_ENTER("ndb_get_commitcount");
(void)strxnmov(name, FN_REFLEN-1, share_prefix, dbname, "/", tabname, NullS); build_table_filename(name, sizeof(name), dbname, tabname, "");
DBUG_PRINT("enter", ("name: %s", name)); DBUG_PRINT("enter", ("name: %s", name));
pthread_mutex_lock(&ndbcluster_mutex); pthread_mutex_lock(&ndbcluster_mutex);
if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables, if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables,
...@@ -6655,6 +6660,8 @@ static int rename_share(NDB_SHARE *share, const char *new_key) ...@@ -6655,6 +6660,8 @@ static int rename_share(NDB_SHARE *share, const char *new_key)
("db.tablename: %s.%s use_count: %d commit_count: %d", ("db.tablename: %s.%s use_count: %d commit_count: %d",
share->db, share->table_name, share->db, share->table_name,
share->use_count, share->commit_count)); share->use_count, share->commit_count));
if (share->table)
{
DBUG_PRINT("rename_share", DBUG_PRINT("rename_share",
("table->s->db.table_name: %s.%s", ("table->s->db.table_name: %s.%s",
share->table->s->db.str, share->table->s->table_name.str)); share->table->s->db.str, share->table->s->table_name.str));
...@@ -6666,6 +6673,7 @@ static int rename_share(NDB_SHARE *share, const char *new_key) ...@@ -6666,6 +6673,7 @@ static int rename_share(NDB_SHARE *share, const char *new_key)
share->table->s->table_name.str= share->table_name; share->table->s->table_name.str= share->table_name;
share->table->s->table_name.length= strlen(share->table_name); share->table->s->table_name.length= strlen(share->table_name);
} }
}
/* else rename will be handled when the ALTER event comes */ /* else rename will be handled when the ALTER event comes */
share->old_names= old_key; share->old_names= old_key;
// ToDo free old_names after ALTER EVENT // ToDo free old_names after ALTER EVENT
......
...@@ -237,10 +237,33 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) ...@@ -237,10 +237,33 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
{ {
THD *thd= current_thd; THD *thd= current_thd;
MEM_ROOT *mem_root= &share->mem_root; MEM_ROOT *mem_root= &share->mem_root;
int do_event_op= ndb_binlog_running;
share->op= 0; share->op= 0;
share->table= 0; share->table= 0;
if (!ndb_binlog_running)
if (!schema_share &&
strcmp(share->db, NDB_REP_DB) == 0 &&
strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
do_event_op= 1;
{
int i, no_nodes= g_ndb_cluster_connection->no_db_nodes();
share->subscriber_bitmap= (MY_BITMAP*)
alloc_root(mem_root, no_nodes * sizeof(MY_BITMAP));
for (i= 0; i < no_nodes; i++)
{
bitmap_init(&share->subscriber_bitmap[i],
(Uint32*)alloc_root(mem_root, max_ndb_nodes/8),
max_ndb_nodes, false);
bitmap_clear_all(&share->subscriber_bitmap[i]);
}
bitmap_init(&share->slock_bitmap, share->slock,
sizeof(share->slock)*8, false);
bitmap_clear_all(&share->slock_bitmap);
}
if (!do_event_op)
{ {
if (_table) if (_table)
{ {
...@@ -315,21 +338,6 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) ...@@ -315,21 +338,6 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
share->ndb_value[1]= (NdbValue*) share->ndb_value[1]= (NdbValue*)
alloc_root(mem_root, sizeof(NdbValue) * table->s->fields alloc_root(mem_root, sizeof(NdbValue) * table->s->fields
+1 /*extra for hidden key*/); +1 /*extra for hidden key*/);
{
int i, no_nodes= g_ndb_cluster_connection->no_db_nodes();
share->subscriber_bitmap= (MY_BITMAP*)
alloc_root(mem_root, no_nodes * sizeof(MY_BITMAP));
for (i= 0; i < no_nodes; i++)
{
bitmap_init(&share->subscriber_bitmap[i],
(Uint32*)alloc_root(mem_root, max_ndb_nodes/8),
max_ndb_nodes, false);
bitmap_clear_all(&share->subscriber_bitmap[i]);
}
bitmap_init(&share->slock_bitmap, share->slock,
sizeof(share->slock)*8, false);
bitmap_clear_all(&share->slock_bitmap);
}
if (table->s->primary_key == MAX_KEY) if (table->s->primary_key == MAX_KEY)
share->flags|= NSF_HIDDEN_PK; share->flags|= NSF_HIDDEN_PK;
if (table->s->blob_fields != 0) if (table->s->blob_fields != 0)
...@@ -648,11 +656,8 @@ static int ndbcluster_create_apply_status_table(THD *thd) ...@@ -648,11 +656,8 @@ static int ndbcluster_create_apply_status_table(THD *thd)
if so, remove it since there is none in Ndb if so, remove it since there is none in Ndb
*/ */
{ {
strxnmov(buf, sizeof(buf), build_table_filename(buf, sizeof(buf),
mysql_data_home, NDB_REP_DB, NDB_APPLY_TABLE, reg_ext);
"/" NDB_REP_DB "/" NDB_APPLY_TABLE,
reg_ext, NullS);
unpack_filename(buf,buf);
my_delete(buf, MYF(0)); my_delete(buf, MYF(0));
} }
...@@ -700,11 +705,8 @@ static int ndbcluster_create_schema_table(THD *thd) ...@@ -700,11 +705,8 @@ static int ndbcluster_create_schema_table(THD *thd)
if so, remove it since there is none in Ndb if so, remove it since there is none in Ndb
*/ */
{ {
strxnmov(buf, sizeof(buf), build_table_filename(buf, sizeof(buf),
mysql_data_home, NDB_REP_DB, NDB_SCHEMA_TABLE, reg_ext);
"/" NDB_REP_DB "/" NDB_SCHEMA_TABLE,
reg_ext, NullS);
unpack_filename(buf,buf);
my_delete(buf, MYF(0)); my_delete(buf, MYF(0));
} }
...@@ -929,8 +931,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, ...@@ -929,8 +931,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
if (get_a_share) if (get_a_share)
{ {
char key[FN_REFLEN]; char key[FN_REFLEN];
(void)strxnmov(key, FN_REFLEN, share_prefix, db, build_table_filename(key, sizeof(key), db, table_name, "");
"/", table_name, NullS);
share= get_share(key, 0, false, false); share= get_share(key, 0, false, false);
} }
...@@ -1358,6 +1359,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, ...@@ -1358,6 +1359,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
switch (ev_type) switch (ev_type)
{ {
case NDBEVENT::TE_UPDATE: case NDBEVENT::TE_UPDATE:
/* fall through */
case NDBEVENT::TE_INSERT: case NDBEVENT::TE_INSERT:
{ {
Cluster_replication_schema *schema= (Cluster_replication_schema *) Cluster_replication_schema *schema= (Cluster_replication_schema *)
...@@ -1375,21 +1377,20 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, ...@@ -1375,21 +1377,20 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
{ {
case SOT_DROP_TABLE: case SOT_DROP_TABLE:
/* binlog dropping table after any table operations */ /* binlog dropping table after any table operations */
if (ndb_binlog_running)
post_epoch_log_list->push_back(schema, mem_root); post_epoch_log_list->push_back(schema, mem_root);
log_query= 0; log_query= 0;
break; break;
case SOT_RENAME_TABLE: case SOT_RENAME_TABLE:
/* fall through */ /* fall through */
case SOT_ALTER_TABLE: case SOT_ALTER_TABLE:
/* fall through */ if (ndb_binlog_running)
if (!ndb_binlog_running)
{ {
log_query= 1; log_query= 1;
break; /* discovery will be handled by binlog */ break; /* discovery will be handled by binlog */
} }
/* fall through */ /* fall through */
case SOT_CREATE_TABLE: case SOT_CREATE_TABLE:
/* fall through */
pthread_mutex_lock(&LOCK_open); pthread_mutex_lock(&LOCK_open);
if (ndb_create_table_from_engine(thd, schema->db, schema->name)) if (ndb_create_table_from_engine(thd, schema->db, schema->name))
{ {
...@@ -1407,6 +1408,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, ...@@ -1407,6 +1408,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
TRUE, /* print error */ TRUE, /* print error */
TRUE); /* don't binlog the query */ TRUE); /* don't binlog the query */
/* binlog dropping database after any table operations */ /* binlog dropping database after any table operations */
if (ndb_binlog_running)
post_epoch_log_list->push_back(schema, mem_root); post_epoch_log_list->push_back(schema, mem_root);
log_query= 0; log_query= 0;
break; break;
...@@ -1422,8 +1424,8 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, ...@@ -1422,8 +1424,8 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
case SOT_CLEAR_SLOCK: case SOT_CLEAR_SLOCK:
{ {
char key[FN_REFLEN]; char key[FN_REFLEN];
(void)strxnmov(key, FN_REFLEN, share_prefix, schema->db, build_table_filename(key, sizeof(key),
"/", schema->name, NullS); schema->db, schema->name, "");
NDB_SHARE *share= get_share(key, 0, false, false); NDB_SHARE *share= get_share(key, 0, false, false);
if (share) if (share)
{ {
...@@ -1463,7 +1465,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, ...@@ -1463,7 +1465,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
} }
} }
if (log_query) if (log_query && ndb_binlog_running)
{ {
char *thd_db_save= thd->db; char *thd_db_save= thd->db;
thd->db= schema->db; thd->db= schema->db;
...@@ -1752,6 +1754,7 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, ...@@ -1752,6 +1754,7 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
const char *table_name, const char *table_name,
my_bool share_may_exist) my_bool share_may_exist)
{ {
int do_event_op= ndb_binlog_running;
DBUG_ENTER("ndbcluster_create_binlog_setup"); DBUG_ENTER("ndbcluster_create_binlog_setup");
DBUG_PRINT("enter",("key: %s key_len: %d %s.%s share_may_exist: %d", DBUG_PRINT("enter",("key: %s key_len: %d %s.%s share_may_exist: %d",
key, key_len, db, table_name, share_may_exist)); key, key_len, db, table_name, share_may_exist));
...@@ -1792,7 +1795,12 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, ...@@ -1792,7 +1795,12 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
"allocating table share for %s failed", key); "allocating table share for %s failed", key);
} }
if (!ndb_binlog_running) if (!schema_share &&
strcmp(share->db, NDB_REP_DB) == 0 &&
strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
do_event_op= 1;
if (!do_event_op)
{ {
share->flags|= NSF_NO_BINLOG; share->flags|= NSF_NO_BINLOG;
pthread_mutex_unlock(&ndbcluster_mutex); pthread_mutex_unlock(&ndbcluster_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment