Commit d6c1a39a authored by unknown's avatar unknown

fix for invalidating table if mismatch with frm

    removed debug printout
    new test in alter table for dictionay update test with multiple connections
    added coice of setting MaxNoOfOrderedIndexes
    added option to run "--small-bench"


mysql-test/mysql-test-run.sh:
  added option to run "--small-bench"
mysql-test/ndb/ndb_config_2_node.ini:
  added coice of setting MaxNoOfOrderedIndexes
mysql-test/ndb/ndbcluster.sh:
  added coice of setting MaxNoOfOrderedIndexes
mysql-test/r/ndb_alter_table.result:
  new test in alter table for dictionay update test with multiple connections
mysql-test/t/ndb_alter_table.test:
  new test in alter table for dictionay update test with multiple connections
ndb/src/ndbapi/DictCache.cpp:
  removed debug printout
sql/ha_ndbcluster.cc:
  fix for invalidating table if mismatch with frm
parent 5ef32b58
...@@ -295,6 +295,11 @@ while test $# -gt 0; do ...@@ -295,6 +295,11 @@ while test $# -gt 0; do
--record) --record)
RECORD=1; RECORD=1;
EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $1" ;; EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $1" ;;
--small-bench)
DO_SMALL_BENCH=1
DO_BENCH=1
NO_SLAVE=1
;;
--bench) --bench)
DO_BENCH=1 DO_BENCH=1
NO_SLAVE=1 NO_SLAVE=1
...@@ -1451,7 +1456,13 @@ then ...@@ -1451,7 +1456,13 @@ then
if [ -z "$USE_RUNNING_NDBCLUSTER" ] if [ -z "$USE_RUNNING_NDBCLUSTER" ]
then then
echo "Starting ndbcluster" echo "Starting ndbcluster"
./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT --small --diskless --initial --data-dir=$MYSQL_TEST_DIR/var || exit 1 if [ "$DO_BENCH" = 1 ]
then
NDBCLUSTER_OPTS=""
else
NDBCLUSTER_OPTS="--small"
fi
./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT $NDBCLUSTER_OPTS --diskless --initial --data-dir=$MYSQL_TEST_DIR/var || exit 1
USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"host=localhost:$NDBCLUSTER_PORT\"" USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"host=localhost:$NDBCLUSTER_PORT\""
else else
USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"$USE_RUNNING_NDBCLUSTER\"" USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"$USE_RUNNING_NDBCLUSTER\""
...@@ -1485,9 +1496,14 @@ if [ "$DO_BENCH" = 1 ] ...@@ -1485,9 +1496,14 @@ if [ "$DO_BENCH" = 1 ]
then then
start_master start_master
if [ "$DO_SMALL_BENCH" = 1 ]
then
EXTRA_BENCH_ARGS="--small-test --small-tables"
fi
if [ ! -z "$USE_NDBCLUSTER" ] if [ ! -z "$USE_NDBCLUSTER" ]
then then
EXTRA_BENCH_ARGS="--create-options=TYPE=ndb" EXTRA_BENCH_ARGS="--create-options=TYPE=ndb $EXTRA_BENCH_ARGS"
fi fi
BENCHDIR=$BASEDIR/sql-bench/ BENCHDIR=$BASEDIR/sql-bench/
...@@ -1495,7 +1511,7 @@ then ...@@ -1495,7 +1511,7 @@ then
cd $BENCHDIR cd $BENCHDIR
if [ -z "$1" ] if [ -z "$1" ]
then then
./run-all-tests --socket=$MASTER_MYSOCK --user=root $EXTRA_BENCH_ARGS ./run-all-tests --socket=$MASTER_MYSOCK --user=root $EXTRA_BENCH_ARGS --log
else else
if [ -x "./$1" ] if [ -x "./$1" ]
then then
......
...@@ -6,6 +6,7 @@ IndexMemory= CHOOSE_IndexMemory ...@@ -6,6 +6,7 @@ IndexMemory= CHOOSE_IndexMemory
Diskless= CHOOSE_Diskless Diskless= CHOOSE_Diskless
TimeBetweenWatchDogCheck= 30000 TimeBetweenWatchDogCheck= 30000
DataDir= CHOOSE_FILESYSTEM DataDir= CHOOSE_FILESYSTEM
MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
[ndbd] [ndbd]
HostName= CHOOSE_HOSTNAME_1 HostName= CHOOSE_HOSTNAME_1
......
...@@ -44,7 +44,8 @@ initial_ndb= ...@@ -44,7 +44,8 @@ initial_ndb=
status_ndb= status_ndb=
ndb_diskless=0 ndb_diskless=0
ndb_con_op=100000 ndb_no_ord=512
ndb_con_op=10000
ndb_dmem=80M ndb_dmem=80M
ndb_imem=24M ndb_imem=24M
...@@ -65,6 +66,7 @@ while test $# -gt 0; do ...@@ -65,6 +66,7 @@ while test $# -gt 0; do
status_ndb=1 status_ndb=1
;; ;;
--small) --small)
ndb_no_ord=128
ndb_con_op=10000 ndb_con_op=10000
ndb_dmem=40M ndb_dmem=40M
ndb_imem=12M ndb_imem=12M
...@@ -128,6 +130,7 @@ port_transporter=`expr $ndb_mgmd_port + 2` ...@@ -128,6 +130,7 @@ port_transporter=`expr $ndb_mgmd_port + 2`
if [ $initial_ndb ] ; then if [ $initial_ndb ] ; then
sed \ sed \
-e s,"CHOOSE_MaxNoOfOrderedIndexes","$ndb_no_ord",g \
-e s,"CHOOSE_MaxNoOfConcurrentOperations","$ndb_con_op",g \ -e s,"CHOOSE_MaxNoOfConcurrentOperations","$ndb_con_op",g \
-e s,"CHOOSE_DataMemory","$ndb_dmem",g \ -e s,"CHOOSE_DataMemory","$ndb_dmem",g \
-e s,"CHOOSE_IndexMemory","$ndb_imem",g \ -e s,"CHOOSE_IndexMemory","$ndb_imem",g \
......
...@@ -73,3 +73,22 @@ col6 col1 col3 fourth col4 col4_5 col5 col7 col8 ...@@ -73,3 +73,22 @@ col6 col1 col3 fourth col4 col4_5 col5 col7 col8
1 101 3 4 5 PENDING 0000-00-00 00:00:00 1 101 3 4 5 PENDING 0000-00-00 00:00:00
2 102 4 3 5 99 PENDING EXTRA 2004-01-01 00:00:00 2 102 4 3 5 99 PENDING EXTRA 2004-01-01 00:00:00
drop table t1; drop table t1;
CREATE TABLE t1 (
a INT NOT NULL,
b INT NOT NULL
) ENGINE=ndbcluster;
INSERT INTO t1 VALUES (9410,9412);
ALTER TABLE t1 ADD COLUMN c int not null;
select * from t1;
a b c
9410 9412 0
select * from t1;
a b c
9410 9412 0
alter table t1 drop c;
select * from t1;
a b
9410 9412
drop table t1;
select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
...@@ -49,6 +49,37 @@ show table status; ...@@ -49,6 +49,37 @@ show table status;
select * from t1 order by col1; select * from t1 order by col1;
drop table t1; drop table t1;
#
# Check that invalidating dictionary cache works
#
CREATE TABLE t1 (
a INT NOT NULL,
b INT NOT NULL
) ENGINE=ndbcluster;
INSERT INTO t1 VALUES (9410,9412);
connect (con1,localhost,,,test);
connect (con2,localhost,,,test);
connection con1;
ALTER TABLE t1 ADD COLUMN c int not null;
select * from t1;
connection con2;
select * from t1;
alter table t1 drop c;
connection con1;
select * from t1;
drop table t1;
connection con2;
--error 1146
select * from t1;
#--disable_warnings #--disable_warnings
#DROP TABLE IF EXISTS t2; #DROP TABLE IF EXISTS t2;
#--enable_warnings #--enable_warnings
......
...@@ -70,27 +70,6 @@ LocalDictCache::put(const char * name, Ndb_local_table_info * tab_info){ ...@@ -70,27 +70,6 @@ LocalDictCache::put(const char * name, Ndb_local_table_info * tab_info){
void void
LocalDictCache::drop(const char * name){ LocalDictCache::drop(const char * name){
Ndb_local_table_info *info= m_tableHash.deleteKey(name, strlen(name)); Ndb_local_table_info *info= m_tableHash.deleteKey(name, strlen(name));
#ifndef DBUG_OFF
if (info == 0) {
ndbout_c("LocalDictCache::drop(%s) info==0", name);
ndbout_c("dump begin");
NdbElement_t<Ndb_local_table_info> * curr = m_tableHash.getNext(0);
while(curr != 0){
Ndb_local_table_info *tmp = curr->theData;
if (tmp) {
ndbout_c("m_table_impl=0x%x, id=%d, name=%s",
tmp->m_table_impl,
tmp->m_table_impl->m_tableId,
tmp->m_table_impl->getName());
} else {
ndbout_c("NULL");
}
curr = m_tableHash.getNext(curr);
}
ndbout_c("dump end");
}
#endif
DBUG_ASSERT(info != 0); DBUG_ASSERT(info != 0);
Ndb_local_table_info::destroy(info); Ndb_local_table_info::destroy(info);
} }
......
...@@ -611,33 +611,19 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -611,33 +611,19 @@ int ha_ndbcluster::get_metadata(const char *path)
{ {
NDBDICT *dict= m_ndb->getDictionary(); NDBDICT *dict= m_ndb->getDictionary();
const NDBTAB *tab; const NDBTAB *tab;
const void *data, *pack_data;
const char **key_name;
uint ndb_columns, mysql_columns, length, pack_length;
int error; int error;
bool invalidating_ndb_table= false;
DBUG_ENTER("get_metadata"); DBUG_ENTER("get_metadata");
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
do {
const void *data, *pack_data;
uint length, pack_length;
if (!(tab= dict->getTable(m_tabname))) if (!(tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError()); ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
/*
This is the place to check that the table we got from NDB
is equal to the one on local disk
*/
ndb_columns= (uint) tab->getNoOfColumns();
mysql_columns= table->fields;
if (table->primary_key == MAX_KEY)
ndb_columns--;
if (ndb_columns != mysql_columns)
{
DBUG_PRINT("error",
("Wrong number of columns, ndb: %d mysql: %d",
ndb_columns, mysql_columns));
DBUG_RETURN(HA_ERR_OLD_METADATA);
}
/* /*
Compare FrmData in NDB with frm file from disk. Compare FrmData in NDB with frm file from disk.
*/ */
...@@ -652,6 +638,14 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -652,6 +638,14 @@ int ha_ndbcluster::get_metadata(const char *path)
if ((pack_length != tab->getFrmLength()) || if ((pack_length != tab->getFrmLength()) ||
(memcmp(pack_data, tab->getFrmData(), pack_length))) (memcmp(pack_data, tab->getFrmData(), pack_length)))
{
if (!invalidating_ndb_table)
{
DBUG_PRINT("info", ("Invalidating table"));
dict->invalidateTable(m_tabname);
invalidating_ndb_table= true;
}
else
{ {
DBUG_PRINT("error", DBUG_PRINT("error",
("metadata, pack_length: %d getFrmLength: %d memcmp: %d", ("metadata, pack_length: %d getFrmLength: %d memcmp: %d",
...@@ -660,9 +654,17 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -660,9 +654,17 @@ int ha_ndbcluster::get_metadata(const char *path)
DBUG_DUMP("pack_data", (char*)pack_data, pack_length); DBUG_DUMP("pack_data", (char*)pack_data, pack_length);
DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength()); DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength());
error= HA_ERR_OLD_METADATA; error= HA_ERR_OLD_METADATA;
invalidating_ndb_table= false;
}
}
else
{
invalidating_ndb_table= false;
} }
my_free((char*)data, MYF(0)); my_free((char*)data, MYF(0));
my_free((char*)pack_data, MYF(0)); my_free((char*)pack_data, MYF(0));
} while (invalidating_ndb_table);
if (error) if (error)
DBUG_RETURN(error); DBUG_RETURN(error);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment