Commit ec33031e authored by unknown's avatar unknown

ndb: bug#6451

1) fix so that missing blob tables don't prevent table from being
    dropped
2) decrease size of blob part if record length exceeds max length
3) add test case for table wo/ corresponding blob table
4) init scan counters when sending scan_tabreq


mysql-test/r/ndb_autodiscover.result:
  testcase for table wo/ corresponding blob tables
mysql-test/r/ndb_autodiscover2.result:
  testcase for table wo/ corresponding blob tables
mysql-test/t/ndb_autodiscover.test:
  testcase for table wo/ corresponding blob tables
mysql-test/t/ndb_autodiscover2.test:
  testcase for table wo/ corresponding blob tables
ndb/include/ndbapi/NdbDictionary.hpp:
  Add non-const get column
ndb/src/ndbapi/NdbDictionary.cpp:
  Add non-const get column
ndb/src/ndbapi/NdbDictionaryImpl.hpp:
  Allow "partially" getTable, which enables dropping of tables
     that fails to create blob tables
ndb/src/ndbapi/NdbScanOperation.cpp:
  Init counter when sending SCAN_TABREQ
sql/ha_ndbcluster.cc:
  Make sure that blob don't have to big part size
parent 12f1b6c6
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
flush status;
create table t1(
id int not null primary key,
......@@ -363,3 +363,8 @@ a int NOT NULL PRIMARY KEY,
b int
) engine=ndb;
insert t9 values(1, 2), (2,3), (3, 4), (4, 5);
create table t10 (
a int not null primary key,
b blob
) engine=ndb;
insert into t10 values (1, 'kalle');
......@@ -8,3 +8,6 @@ show status like 'handler_discover%';
Variable_name Value
Handler_discover 1
drop table t9;
select * from t10;
ERROR HY000: Got error 4263 'Invalid blob attributes or invalid blob parts table' from ndbcluster
drop table t10;
-- source include/have_ndb.inc
--disable_warnings
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
--enable_warnings
################################################
......@@ -472,5 +472,11 @@ system rm var/master-data/test/t9.frm ;
# MySQL Server will have been restarted because it has a
# ndb_autodiscover2-master.opt file.
create table t10 (
a int not null primary key,
b blob
) engine=ndb;
insert into t10 values (1, 'kalle');
--exec $NDB_TOOLS_DIR/ndb_drop_table -d test `$NDB_TOOLS_DIR/ndb_show_tables | grep BLOB` > /dev/null 2>&1 || true
......@@ -13,4 +13,7 @@ show status like 'handler_discover%';
drop table t9;
--error 1296
select * from t10;
drop table t10;
......@@ -369,7 +369,7 @@ public:
*/
bool getDistributionKey() const;
/** @} *******************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
void setTupleKey(bool);
bool getTupleKey() const;
......@@ -486,6 +486,18 @@ public:
*/
const Column* getColumn(const char * name) const;
/**
* Get column definition via index in table.
* @return null if none existing name
*/
Column* getColumn(const int attributeId);
/**
* Get column definition via name.
* @return null if none existing name
*/
Column* getColumn(const char * name);
/**
* Get column definition via index in table.
* @return null if none existing name
......
......@@ -343,6 +343,18 @@ NdbDictionary::Table::getColumn(const int attrId) const {
return m_impl.getColumn(attrId);
}
NdbDictionary::Column*
NdbDictionary::Table::getColumn(const char * name)
{
return m_impl.getColumn(name);
}
NdbDictionary::Column*
NdbDictionary::Table::getColumn(const int attrId)
{
return m_impl.getColumn(attrId);
}
void
NdbDictionary::Table::setLogging(bool val){
m_impl.m_logging = val;
......
......@@ -637,11 +637,9 @@ NdbDictionaryImpl::get_local_table_info(const char * internalTableName,
return 0;
}
}
if (do_add_blob_tables &&
info->m_table_impl->m_noOfBlobs &&
addBlobTables(*(info->m_table_impl))) {
return 0;
}
if (do_add_blob_tables && info->m_table_impl->m_noOfBlobs)
addBlobTables(*(info->m_table_impl));
return info; // autoincrement already initialized
}
......
......@@ -850,6 +850,14 @@ NdbScanOperation::doSendScan(int aProcessorId)
tSignal = tSignal->next();
}
theStatus = WaitResponse;
m_sent_receivers_count = theParallelism;
if(m_ordered)
{
m_current_api_receiver = theParallelism;
m_api_receivers_count = theParallelism;
}
return tSignalCount;
}//NdbOperation::doSendScan()
......@@ -1507,13 +1515,8 @@ NdbScanOperation::reset_receivers(Uint32 parallell, Uint32 ordered){
m_api_receivers_count = 0;
m_current_api_receiver = 0;
m_sent_receivers_count = parallell;
m_sent_receivers_count = 0;
m_conf_receivers_count = 0;
if(ordered){
m_current_api_receiver = parallell;
m_api_receivers_count = parallell;
}
}
int
......
......@@ -3307,7 +3307,7 @@ int ha_ndbcluster::create(const char *name,
{
NDBTAB tab;
NDBCOL col;
uint pack_length, length, i;
uint pack_length, length, i, pk_length= 0;
const void *data, *pack_data;
const char **key_names= form->keynames.type_names;
char name2[FN_HEADLEN];
......@@ -3354,6 +3354,8 @@ int ha_ndbcluster::create(const char *name,
if ((my_errno= create_ndb_column(col, field, info)))
DBUG_RETURN(my_errno);
tab.addColumn(col);
if(col.getPrimaryKey())
pk_length += (field->pack_length() + 3) / 4;
}
// No primary key, create shadow key as 64 bit, auto increment
......@@ -3367,6 +3369,39 @@ int ha_ndbcluster::create(const char *name,
col.setPrimaryKey(TRUE);
col.setAutoIncrement(TRUE);
tab.addColumn(col);
pk_length += 2;
}
// Make sure that blob tables don't have to big part size
for (i= 0; i < form->fields; i++)
{
/**
* The extra +7 concists
* 2 - words from pk in blob table
* 5 - from extra words added by tup/dict??
*/
switch (form->field[i]->real_type()) {
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_MEDIUM_BLOB:
case MYSQL_TYPE_LONG_BLOB:
{
NdbDictionary::Column * col = tab.getColumn(i);
int size = pk_length + (col->getPartSize()+3)/4 + 7;
if(size > NDB_MAX_TUPLE_SIZE_IN_WORDS &&
(pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS)
{
size = NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7;
col->setPartSize(4*size);
}
/**
* If size > NDB_MAX and pk_length+7 >= NDB_MAX
* then the table can't be created anyway, so skip
* changing part size, and have error later
*/
}
default:
break;
}
}
if ((my_errno= check_ndb_connection()))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment